tag_sja1105.c (24507B)
1// SPDX-License-Identifier: GPL-2.0 2/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> 3 */ 4#include <linux/if_vlan.h> 5#include <linux/dsa/sja1105.h> 6#include <linux/dsa/8021q.h> 7#include <linux/packing.h> 8#include "dsa_priv.h" 9 10/* Is this a TX or an RX header? */ 11#define SJA1110_HEADER_HOST_TO_SWITCH BIT(15) 12 13/* RX header */ 14#define SJA1110_RX_HEADER_IS_METADATA BIT(14) 15#define SJA1110_RX_HEADER_HOST_ONLY BIT(13) 16#define SJA1110_RX_HEADER_HAS_TRAILER BIT(12) 17 18/* Trap-to-host format (no trailer present) */ 19#define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4) 20#define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0)) 21 22/* Timestamp format (trailer present) */ 23#define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0)) 24 25#define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4) 26#define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0)) 27 28/* Meta frame format (for 2-step TX timestamps) */ 29#define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4) 30 31/* TX header */ 32#define SJA1110_TX_HEADER_UPDATE_TC BIT(14) 33#define SJA1110_TX_HEADER_TAKE_TS BIT(13) 34#define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12) 35#define SJA1110_TX_HEADER_HAS_TRAILER BIT(11) 36 37/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */ 38#define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7)) 39#define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0)) 40 41/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */ 42#define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0)) 43 44#define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24)) 45#define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21)) 46#define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12)) 47#define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1)) 48 49#define SJA1110_META_TSTAMP_SIZE 10 50 51#define SJA1110_HEADER_LEN 4 52#define SJA1110_RX_TRAILER_LEN 13 53#define SJA1110_TX_TRAILER_LEN 4 54#define SJA1110_MAX_PADDING_LEN 15 55 56#define SJA1105_HWTS_RX_EN 0 57 58struct sja1105_tagger_private { 59 struct sja1105_tagger_data data; /* Must be first */ 60 unsigned long state; 61 /* Protects concurrent access to the meta state machine 62 * from taggers running on multiple ports on SMP systems 63 */ 64 spinlock_t meta_lock; 65 struct sk_buff *stampable_skb; 66 struct kthread_worker *xmit_worker; 67}; 68 69static struct sja1105_tagger_private * 70sja1105_tagger_private(struct dsa_switch *ds) 71{ 72 return ds->tagger_data; 73} 74 75/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */ 76static inline bool sja1105_is_link_local(const struct sk_buff *skb) 77{ 78 const struct ethhdr *hdr = eth_hdr(skb); 79 u64 dmac = ether_addr_to_u64(hdr->h_dest); 80 81 if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META) 82 return false; 83 if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) == 84 SJA1105_LINKLOCAL_FILTER_A) 85 return true; 86 if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) == 87 SJA1105_LINKLOCAL_FILTER_B) 88 return true; 89 return false; 90} 91 92struct sja1105_meta { 93 u64 tstamp; 94 u64 dmac_byte_4; 95 u64 dmac_byte_3; 96 u64 source_port; 97 u64 switch_id; 98}; 99 100static void sja1105_meta_unpack(const struct sk_buff *skb, 101 struct sja1105_meta *meta) 102{ 103 u8 *buf = skb_mac_header(skb) + ETH_HLEN; 104 105 /* UM10944.pdf section 4.2.17 AVB Parameters: 106 * Structure of the meta-data follow-up frame. 107 * It is in network byte order, so there are no quirks 108 * while unpacking the meta frame. 109 * 110 * Also SJA1105 E/T only populates bits 23:0 of the timestamp 111 * whereas P/Q/R/S does 32 bits. Since the structure is the 112 * same and the E/T puts zeroes in the high-order byte, use 113 * a unified unpacking command for both device series. 114 */ 115 packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0); 116 packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0); 117 packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0); 118 packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0); 119 packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0); 120} 121 122static inline bool sja1105_is_meta_frame(const struct sk_buff *skb) 123{ 124 const struct ethhdr *hdr = eth_hdr(skb); 125 u64 smac = ether_addr_to_u64(hdr->h_source); 126 u64 dmac = ether_addr_to_u64(hdr->h_dest); 127 128 if (smac != SJA1105_META_SMAC) 129 return false; 130 if (dmac != SJA1105_META_DMAC) 131 return false; 132 if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META) 133 return false; 134 return true; 135} 136 137/* Calls sja1105_port_deferred_xmit in sja1105_main.c */ 138static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, 139 struct sk_buff *skb) 140{ 141 struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds); 142 struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds); 143 void (*xmit_work_fn)(struct kthread_work *work); 144 struct sja1105_deferred_xmit_work *xmit_work; 145 struct kthread_worker *xmit_worker; 146 147 xmit_work_fn = tagger_data->xmit_work_fn; 148 xmit_worker = priv->xmit_worker; 149 150 if (!xmit_work_fn || !xmit_worker) 151 return NULL; 152 153 xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); 154 if (!xmit_work) 155 return NULL; 156 157 kthread_init_work(&xmit_work->work, xmit_work_fn); 158 /* Increase refcount so the kfree_skb in dsa_slave_xmit 159 * won't really free the packet. 160 */ 161 xmit_work->dp = dp; 162 xmit_work->skb = skb_get(skb); 163 164 kthread_queue_work(xmit_worker, &xmit_work->work); 165 166 return NULL; 167} 168 169/* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a 170 * bridge spanning ports of this switch might have. 171 */ 172static u16 sja1105_xmit_tpid(struct dsa_port *dp) 173{ 174 struct dsa_switch *ds = dp->ds; 175 struct dsa_port *other_dp; 176 u16 proto; 177 178 /* Since VLAN awareness is global, then if this port is VLAN-unaware, 179 * all ports are. Use the VLAN-unaware TPID used for tag_8021q. 180 */ 181 if (!dsa_port_is_vlan_filtering(dp)) 182 return ETH_P_SJA1105; 183 184 /* Port is VLAN-aware, so there is a bridge somewhere (a single one, 185 * we're sure about that). It may not be on this port though, so we 186 * need to find it. 187 */ 188 dsa_switch_for_each_port(other_dp, ds) { 189 struct net_device *br = dsa_port_bridge_dev_get(other_dp); 190 191 if (!br) 192 continue; 193 194 /* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING, 195 * which seems pointless to handle, as our port cannot become 196 * VLAN-aware in that case. 197 */ 198 br_vlan_get_proto(br, &proto); 199 200 return proto; 201 } 202 203 WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n"); 204 205 return ETH_P_SJA1105; 206} 207 208static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, 209 struct net_device *netdev) 210{ 211 struct dsa_port *dp = dsa_slave_to_port(netdev); 212 unsigned int bridge_num = dsa_port_bridge_num_get(dp); 213 struct net_device *br = dsa_port_bridge_dev_get(dp); 214 u16 tx_vid; 215 216 /* If the port is under a VLAN-aware bridge, just slide the 217 * VLAN-tagged packet into the FDB and hope for the best. 218 * This works because we support a single VLAN-aware bridge 219 * across the entire dst, and its VLANs cannot be shared with 220 * any standalone port. 221 */ 222 if (br_vlan_enabled(br)) 223 return skb; 224 225 /* If the port is under a VLAN-unaware bridge, use an imprecise 226 * TX VLAN that targets the bridge's entire broadcast domain, 227 * instead of just the specific port. 228 */ 229 tx_vid = dsa_tag_8021q_bridge_vid(bridge_num); 230 231 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid); 232} 233 234/* Transform untagged control packets into pvid-tagged control packets so that 235 * all packets sent by this tagger are VLAN-tagged and we can configure the 236 * switch to drop untagged packets coming from the DSA master. 237 */ 238static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp, 239 struct sk_buff *skb, u8 pcp) 240{ 241 __be16 xmit_tpid = htons(sja1105_xmit_tpid(dp)); 242 struct vlan_ethhdr *hdr; 243 244 /* If VLAN tag is in hwaccel area, move it to the payload 245 * to deal with both cases uniformly and to ensure that 246 * the VLANs are added in the right order. 247 */ 248 if (unlikely(skb_vlan_tag_present(skb))) { 249 skb = __vlan_hwaccel_push_inside(skb); 250 if (!skb) 251 return NULL; 252 } 253 254 hdr = (struct vlan_ethhdr *)skb_mac_header(skb); 255 256 /* If skb is already VLAN-tagged, leave that VLAN ID in place */ 257 if (hdr->h_vlan_proto == xmit_tpid) 258 return skb; 259 260 return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) | 261 SJA1105_DEFAULT_VLAN); 262} 263 264static struct sk_buff *sja1105_xmit(struct sk_buff *skb, 265 struct net_device *netdev) 266{ 267 struct dsa_port *dp = dsa_slave_to_port(netdev); 268 u16 queue_mapping = skb_get_queue_mapping(skb); 269 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); 270 u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); 271 272 if (skb->offload_fwd_mark) 273 return sja1105_imprecise_xmit(skb, netdev); 274 275 /* Transmitting management traffic does not rely upon switch tagging, 276 * but instead SPI-installed management routes. Part 2 of this 277 * is the .port_deferred_xmit driver callback. 278 */ 279 if (unlikely(sja1105_is_link_local(skb))) { 280 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp); 281 if (!skb) 282 return NULL; 283 284 return sja1105_defer_xmit(dp, skb); 285 } 286 287 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), 288 ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); 289} 290 291static struct sk_buff *sja1110_xmit(struct sk_buff *skb, 292 struct net_device *netdev) 293{ 294 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; 295 struct dsa_port *dp = dsa_slave_to_port(netdev); 296 u16 queue_mapping = skb_get_queue_mapping(skb); 297 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); 298 u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); 299 __be32 *tx_trailer; 300 __be16 *tx_header; 301 int trailer_pos; 302 303 if (skb->offload_fwd_mark) 304 return sja1105_imprecise_xmit(skb, netdev); 305 306 /* Transmitting control packets is done using in-band control 307 * extensions, while data packets are transmitted using 308 * tag_8021q TX VLANs. 309 */ 310 if (likely(!sja1105_is_link_local(skb))) 311 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), 312 ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); 313 314 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp); 315 if (!skb) 316 return NULL; 317 318 skb_push(skb, SJA1110_HEADER_LEN); 319 320 dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN); 321 322 trailer_pos = skb->len; 323 324 tx_header = dsa_etype_header_pos_tx(skb); 325 tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN); 326 327 tx_header[0] = htons(ETH_P_SJA1110); 328 tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH | 329 SJA1110_TX_HEADER_HAS_TRAILER | 330 SJA1110_TX_HEADER_TRAILER_POS(trailer_pos)); 331 *tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) | 332 SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) | 333 SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index))); 334 if (clone) { 335 u8 ts_id = SJA1105_SKB_CB(clone)->ts_id; 336 337 tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS); 338 *tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id)); 339 } 340 341 return skb; 342} 343 344static void sja1105_transfer_meta(struct sk_buff *skb, 345 const struct sja1105_meta *meta) 346{ 347 struct ethhdr *hdr = eth_hdr(skb); 348 349 hdr->h_dest[3] = meta->dmac_byte_3; 350 hdr->h_dest[4] = meta->dmac_byte_4; 351 SJA1105_SKB_CB(skb)->tstamp = meta->tstamp; 352} 353 354/* This is a simple state machine which follows the hardware mechanism of 355 * generating RX timestamps: 356 * 357 * After each timestampable skb (all traffic for which send_meta1 and 358 * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame 359 * containing a partial timestamp is immediately generated by the switch and 360 * sent as a follow-up to the link-local frame on the CPU port. 361 * 362 * The meta frames have no unique identifier (such as sequence number) by which 363 * one may pair them to the correct timestampable frame. 364 * Instead, the switch has internal logic that ensures no frames are sent on 365 * the CPU port between a link-local timestampable frame and its corresponding 366 * meta follow-up. It also ensures strict ordering between ports (lower ports 367 * have higher priority towards the CPU port). For this reason, a per-port 368 * data structure is not needed/desirable. 369 * 370 * This function pairs the link-local frame with its partial timestamp from the 371 * meta follow-up frame. The full timestamp will be reconstructed later in a 372 * work queue. 373 */ 374static struct sk_buff 375*sja1105_rcv_meta_state_machine(struct sk_buff *skb, 376 struct sja1105_meta *meta, 377 bool is_link_local, 378 bool is_meta) 379{ 380 /* Step 1: A timestampable frame was received. 381 * Buffer it until we get its meta frame. 382 */ 383 if (is_link_local) { 384 struct dsa_port *dp = dsa_slave_to_port(skb->dev); 385 struct sja1105_tagger_private *priv; 386 struct dsa_switch *ds = dp->ds; 387 388 priv = sja1105_tagger_private(ds); 389 390 if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) 391 /* Do normal processing. */ 392 return skb; 393 394 spin_lock(&priv->meta_lock); 395 /* Was this a link-local frame instead of the meta 396 * that we were expecting? 397 */ 398 if (priv->stampable_skb) { 399 dev_err_ratelimited(ds->dev, 400 "Expected meta frame, is %12llx " 401 "in the DSA master multicast filter?\n", 402 SJA1105_META_DMAC); 403 kfree_skb(priv->stampable_skb); 404 } 405 406 /* Hold a reference to avoid dsa_switch_rcv 407 * from freeing the skb. 408 */ 409 priv->stampable_skb = skb_get(skb); 410 spin_unlock(&priv->meta_lock); 411 412 /* Tell DSA we got nothing */ 413 return NULL; 414 415 /* Step 2: The meta frame arrived. 416 * Time to take the stampable skb out of the closet, annotate it 417 * with the partial timestamp, and pretend that we received it 418 * just now (basically masquerade the buffered frame as the meta 419 * frame, which serves no further purpose). 420 */ 421 } else if (is_meta) { 422 struct dsa_port *dp = dsa_slave_to_port(skb->dev); 423 struct sja1105_tagger_private *priv; 424 struct dsa_switch *ds = dp->ds; 425 struct sk_buff *stampable_skb; 426 427 priv = sja1105_tagger_private(ds); 428 429 /* Drop the meta frame if we're not in the right state 430 * to process it. 431 */ 432 if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) 433 return NULL; 434 435 spin_lock(&priv->meta_lock); 436 437 stampable_skb = priv->stampable_skb; 438 priv->stampable_skb = NULL; 439 440 /* Was this a meta frame instead of the link-local 441 * that we were expecting? 442 */ 443 if (!stampable_skb) { 444 dev_err_ratelimited(ds->dev, 445 "Unexpected meta frame\n"); 446 spin_unlock(&priv->meta_lock); 447 return NULL; 448 } 449 450 if (stampable_skb->dev != skb->dev) { 451 dev_err_ratelimited(ds->dev, 452 "Meta frame on wrong port\n"); 453 spin_unlock(&priv->meta_lock); 454 return NULL; 455 } 456 457 /* Free the meta frame and give DSA the buffered stampable_skb 458 * for further processing up the network stack. 459 */ 460 kfree_skb(skb); 461 skb = stampable_skb; 462 sja1105_transfer_meta(skb, meta); 463 464 spin_unlock(&priv->meta_lock); 465 } 466 467 return skb; 468} 469 470static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds) 471{ 472 struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); 473 474 return test_bit(SJA1105_HWTS_RX_EN, &priv->state); 475} 476 477static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on) 478{ 479 struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); 480 481 if (on) 482 set_bit(SJA1105_HWTS_RX_EN, &priv->state); 483 else 484 clear_bit(SJA1105_HWTS_RX_EN, &priv->state); 485 486 /* Initialize the meta state machine to a known state */ 487 if (!priv->stampable_skb) 488 return; 489 490 kfree_skb(priv->stampable_skb); 491 priv->stampable_skb = NULL; 492} 493 494static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb) 495{ 496 u16 tpid = ntohs(eth_hdr(skb)->h_proto); 497 498 return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q || 499 skb_vlan_tag_present(skb); 500} 501 502static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb) 503{ 504 return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110; 505} 506 507/* If the VLAN in the packet is a tag_8021q one, set @source_port and 508 * @switch_id and strip the header. Otherwise set @vid and keep it in the 509 * packet. 510 */ 511static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, 512 int *switch_id, int *vbid, u16 *vid) 513{ 514 struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb); 515 u16 vlan_tci; 516 517 if (skb_vlan_tag_present(skb)) 518 vlan_tci = skb_vlan_tag_get(skb); 519 else 520 vlan_tci = ntohs(hdr->h_vlan_TCI); 521 522 if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK)) 523 return dsa_8021q_rcv(skb, source_port, switch_id, vbid); 524 525 /* Try our best with imprecise RX */ 526 *vid = vlan_tci & VLAN_VID_MASK; 527} 528 529static struct sk_buff *sja1105_rcv(struct sk_buff *skb, 530 struct net_device *netdev) 531{ 532 int source_port = -1, switch_id = -1, vbid = -1; 533 struct sja1105_meta meta = {0}; 534 struct ethhdr *hdr; 535 bool is_link_local; 536 bool is_meta; 537 u16 vid; 538 539 hdr = eth_hdr(skb); 540 is_link_local = sja1105_is_link_local(skb); 541 is_meta = sja1105_is_meta_frame(skb); 542 543 if (sja1105_skb_has_tag_8021q(skb)) { 544 /* Normal traffic path. */ 545 sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); 546 } else if (is_link_local) { 547 /* Management traffic path. Switch embeds the switch ID and 548 * port ID into bytes of the destination MAC, courtesy of 549 * the incl_srcpt options. 550 */ 551 source_port = hdr->h_dest[3]; 552 switch_id = hdr->h_dest[4]; 553 /* Clear the DMAC bytes that were mangled by the switch */ 554 hdr->h_dest[3] = 0; 555 hdr->h_dest[4] = 0; 556 } else if (is_meta) { 557 sja1105_meta_unpack(skb, &meta); 558 source_port = meta.source_port; 559 switch_id = meta.switch_id; 560 } else { 561 return NULL; 562 } 563 564 if (vbid >= 1) 565 skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); 566 else if (source_port == -1 || switch_id == -1) 567 skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); 568 else 569 skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); 570 if (!skb->dev) { 571 netdev_warn(netdev, "Couldn't decode source port\n"); 572 return NULL; 573 } 574 575 if (!is_link_local) 576 dsa_default_offload_fwd_mark(skb); 577 578 return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local, 579 is_meta); 580} 581 582static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) 583{ 584 u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN; 585 int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); 586 int n_ts = SJA1110_RX_HEADER_N_TS(rx_header); 587 struct sja1105_tagger_data *tagger_data; 588 struct net_device *master = skb->dev; 589 struct dsa_port *cpu_dp; 590 struct dsa_switch *ds; 591 int i; 592 593 cpu_dp = master->dsa_ptr; 594 ds = dsa_switch_find(cpu_dp->dst->index, switch_id); 595 if (!ds) { 596 net_err_ratelimited("%s: cannot find switch id %d\n", 597 master->name, switch_id); 598 return NULL; 599 } 600 601 tagger_data = sja1105_tagger_data(ds); 602 if (!tagger_data->meta_tstamp_handler) 603 return NULL; 604 605 for (i = 0; i <= n_ts; i++) { 606 u8 ts_id, source_port, dir; 607 u64 tstamp; 608 609 ts_id = buf[0]; 610 source_port = (buf[1] & GENMASK(7, 4)) >> 4; 611 dir = (buf[1] & BIT(3)) >> 3; 612 tstamp = be64_to_cpu(*(__be64 *)(buf + 2)); 613 614 tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir, 615 tstamp); 616 617 buf += SJA1110_META_TSTAMP_SIZE; 618 } 619 620 /* Discard the meta frame, we've consumed the timestamps it contained */ 621 return NULL; 622} 623 624static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb, 625 int *source_port, 626 int *switch_id, 627 bool *host_only) 628{ 629 u16 rx_header; 630 631 if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN))) 632 return NULL; 633 634 /* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly 635 * what we need because the caller has checked the EtherType (which is 636 * located 2 bytes back) and we just need a pointer to the header that 637 * comes afterwards. 638 */ 639 rx_header = ntohs(*(__be16 *)skb->data); 640 641 if (rx_header & SJA1110_RX_HEADER_HOST_ONLY) 642 *host_only = true; 643 644 if (rx_header & SJA1110_RX_HEADER_IS_METADATA) 645 return sja1110_rcv_meta(skb, rx_header); 646 647 /* Timestamp frame, we have a trailer */ 648 if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) { 649 int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header); 650 u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN; 651 u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp; 652 u8 last_byte = rx_trailer[12]; 653 654 /* The timestamp is unaligned, so we need to use packing() 655 * to get it 656 */ 657 packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0); 658 659 *source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte); 660 *switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte); 661 662 /* skb->len counts from skb->data, while start_of_padding 663 * counts from the destination MAC address. Right now skb->data 664 * is still as set by the DSA master, so to trim away the 665 * padding and trailer we need to account for the fact that 666 * skb->data points to skb_mac_header(skb) + ETH_HLEN. 667 */ 668 pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN); 669 /* Trap-to-host frame, no timestamp trailer */ 670 } else { 671 *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header); 672 *switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); 673 } 674 675 /* Advance skb->data past the DSA header */ 676 skb_pull_rcsum(skb, SJA1110_HEADER_LEN); 677 678 dsa_strip_etype_header(skb, SJA1110_HEADER_LEN); 679 680 /* With skb->data in its final place, update the MAC header 681 * so that eth_hdr() continues to works properly. 682 */ 683 skb_set_mac_header(skb, -ETH_HLEN); 684 685 return skb; 686} 687 688static struct sk_buff *sja1110_rcv(struct sk_buff *skb, 689 struct net_device *netdev) 690{ 691 int source_port = -1, switch_id = -1, vbid = -1; 692 bool host_only = false; 693 u16 vid = 0; 694 695 if (sja1110_skb_has_inband_control_extension(skb)) { 696 skb = sja1110_rcv_inband_control_extension(skb, &source_port, 697 &switch_id, 698 &host_only); 699 if (!skb) 700 return NULL; 701 } 702 703 /* Packets with in-band control extensions might still have RX VLANs */ 704 if (likely(sja1105_skb_has_tag_8021q(skb))) 705 sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); 706 707 if (vbid >= 1) 708 skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); 709 else if (source_port == -1 || switch_id == -1) 710 skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); 711 else 712 skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); 713 if (!skb->dev) { 714 netdev_warn(netdev, "Couldn't decode source port\n"); 715 return NULL; 716 } 717 718 if (!host_only) 719 dsa_default_offload_fwd_mark(skb); 720 721 return skb; 722} 723 724static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto, 725 int *offset) 726{ 727 /* No tag added for management frames, all ok */ 728 if (unlikely(sja1105_is_link_local(skb))) 729 return; 730 731 dsa_tag_generic_flow_dissect(skb, proto, offset); 732} 733 734static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto, 735 int *offset) 736{ 737 /* Management frames have 2 DSA tags on RX, so the needed_headroom we 738 * declared is fine for the generic dissector adjustment procedure. 739 */ 740 if (unlikely(sja1105_is_link_local(skb))) 741 return dsa_tag_generic_flow_dissect(skb, proto, offset); 742 743 /* For the rest, there is a single DSA tag, the tag_8021q one */ 744 *offset = VLAN_HLEN; 745 *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1]; 746} 747 748static void sja1105_disconnect(struct dsa_switch *ds) 749{ 750 struct sja1105_tagger_private *priv = ds->tagger_data; 751 752 kthread_destroy_worker(priv->xmit_worker); 753 kfree(priv); 754 ds->tagger_data = NULL; 755} 756 757static int sja1105_connect(struct dsa_switch *ds) 758{ 759 struct sja1105_tagger_data *tagger_data; 760 struct sja1105_tagger_private *priv; 761 struct kthread_worker *xmit_worker; 762 int err; 763 764 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 765 if (!priv) 766 return -ENOMEM; 767 768 spin_lock_init(&priv->meta_lock); 769 770 xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", 771 ds->dst->index, ds->index); 772 if (IS_ERR(xmit_worker)) { 773 err = PTR_ERR(xmit_worker); 774 kfree(priv); 775 return err; 776 } 777 778 priv->xmit_worker = xmit_worker; 779 /* Export functions for switch driver use */ 780 tagger_data = &priv->data; 781 tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state; 782 tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state; 783 ds->tagger_data = priv; 784 785 return 0; 786} 787 788static const struct dsa_device_ops sja1105_netdev_ops = { 789 .name = "sja1105", 790 .proto = DSA_TAG_PROTO_SJA1105, 791 .xmit = sja1105_xmit, 792 .rcv = sja1105_rcv, 793 .connect = sja1105_connect, 794 .disconnect = sja1105_disconnect, 795 .needed_headroom = VLAN_HLEN, 796 .flow_dissect = sja1105_flow_dissect, 797 .promisc_on_master = true, 798}; 799 800DSA_TAG_DRIVER(sja1105_netdev_ops); 801MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105); 802 803static const struct dsa_device_ops sja1110_netdev_ops = { 804 .name = "sja1110", 805 .proto = DSA_TAG_PROTO_SJA1110, 806 .xmit = sja1110_xmit, 807 .rcv = sja1110_rcv, 808 .connect = sja1105_connect, 809 .disconnect = sja1105_disconnect, 810 .flow_dissect = sja1110_flow_dissect, 811 .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN, 812 .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN, 813}; 814 815DSA_TAG_DRIVER(sja1110_netdev_ops); 816MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110); 817 818static struct dsa_tag_driver *sja1105_tag_driver_array[] = { 819 &DSA_TAG_DRIVER_NAME(sja1105_netdev_ops), 820 &DSA_TAG_DRIVER_NAME(sja1110_netdev_ops), 821}; 822 823module_dsa_tag_drivers(sja1105_tag_driver_array); 824 825MODULE_LICENSE("GPL v2");