hisi_femac.c (24176B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Hisilicon Fast Ethernet MAC Driver 4 * 5 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. 6 */ 7 8#include <linux/circ_buf.h> 9#include <linux/clk.h> 10#include <linux/etherdevice.h> 11#include <linux/interrupt.h> 12#include <linux/module.h> 13#include <linux/of_mdio.h> 14#include <linux/of_net.h> 15#include <linux/platform_device.h> 16#include <linux/reset.h> 17 18/* MAC control register list */ 19#define MAC_PORTSEL 0x0200 20#define MAC_PORTSEL_STAT_CPU BIT(0) 21#define MAC_PORTSEL_RMII BIT(1) 22#define MAC_PORTSET 0x0208 23#define MAC_PORTSET_DUPLEX_FULL BIT(0) 24#define MAC_PORTSET_LINKED BIT(1) 25#define MAC_PORTSET_SPEED_100M BIT(2) 26#define MAC_SET 0x0210 27#define MAX_FRAME_SIZE 1600 28#define MAX_FRAME_SIZE_MASK GENMASK(10, 0) 29#define BIT_PAUSE_EN BIT(18) 30#define RX_COALESCE_SET 0x0340 31#define RX_COALESCED_FRAME_OFFSET 24 32#define RX_COALESCED_FRAMES 8 33#define RX_COALESCED_TIMER 0x74 34#define QLEN_SET 0x0344 35#define RX_DEPTH_OFFSET 8 36#define MAX_HW_FIFO_DEPTH 64 37#define HW_TX_FIFO_DEPTH 12 38#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH) 39#define IQFRM_DES 0x0354 40#define RX_FRAME_LEN_MASK GENMASK(11, 0) 41#define IQ_ADDR 0x0358 42#define EQ_ADDR 0x0360 43#define EQFRM_LEN 0x0364 44#define ADDRQ_STAT 0x036C 45#define TX_CNT_INUSE_MASK GENMASK(5, 0) 46#define BIT_TX_READY BIT(24) 47#define BIT_RX_READY BIT(25) 48/* global control register list */ 49#define GLB_HOSTMAC_L32 0x0000 50#define GLB_HOSTMAC_H16 0x0004 51#define GLB_SOFT_RESET 0x0008 52#define SOFT_RESET_ALL BIT(0) 53#define GLB_FWCTRL 0x0010 54#define FWCTRL_VLAN_ENABLE BIT(0) 55#define FWCTRL_FW2CPU_ENA BIT(5) 56#define FWCTRL_FWALL2CPU BIT(7) 57#define GLB_MACTCTRL 0x0014 58#define MACTCTRL_UNI2CPU BIT(1) 59#define MACTCTRL_MULTI2CPU BIT(3) 60#define MACTCTRL_BROAD2CPU BIT(5) 61#define MACTCTRL_MACT_ENA BIT(7) 62#define GLB_IRQ_STAT 0x0030 63#define GLB_IRQ_ENA 0x0034 64#define IRQ_ENA_PORT0_MASK GENMASK(7, 0) 65#define IRQ_ENA_PORT0 BIT(18) 66#define IRQ_ENA_ALL BIT(19) 67#define GLB_IRQ_RAW 0x0038 68#define IRQ_INT_RX_RDY BIT(0) 69#define IRQ_INT_TX_PER_PACKET BIT(1) 70#define IRQ_INT_TX_FIFO_EMPTY BIT(6) 71#define IRQ_INT_MULTI_RXRDY BIT(7) 72#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \ 73 IRQ_INT_TX_PER_PACKET | \ 74 IRQ_INT_TX_FIFO_EMPTY) 75#define GLB_MAC_L32_BASE 0x0100 76#define GLB_MAC_H16_BASE 0x0104 77#define MACFLT_HI16_MASK GENMASK(15, 0) 78#define BIT_MACFLT_ENA BIT(17) 79#define BIT_MACFLT_FW2CPU BIT(21) 80#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8)) 81#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8)) 82#define MAX_MAC_FILTER_NUM 8 83#define MAX_UNICAST_ADDRESSES 2 84#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \ 85 MAX_UNICAST_ADDRESSES) 86/* software tx and rx queue number, should be power of 2 */ 87#define TXQ_NUM 64 88#define RXQ_NUM 128 89#define FEMAC_POLL_WEIGHT 16 90 91#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us" 92 93enum phy_reset_delays { 94 PRE_DELAY, 95 PULSE, 96 POST_DELAY, 97 DELAYS_NUM, 98}; 99 100struct hisi_femac_queue { 101 struct sk_buff **skb; 102 dma_addr_t *dma_phys; 103 int num; 104 unsigned int head; 105 unsigned int tail; 106}; 107 108struct hisi_femac_priv { 109 void __iomem *port_base; 110 void __iomem *glb_base; 111 struct clk *clk; 112 struct reset_control *mac_rst; 113 struct reset_control *phy_rst; 114 u32 phy_reset_delays[DELAYS_NUM]; 115 u32 link_status; 116 117 struct device *dev; 118 struct net_device *ndev; 119 120 struct hisi_femac_queue txq; 121 struct hisi_femac_queue rxq; 122 u32 tx_fifo_used_cnt; 123 struct napi_struct napi; 124}; 125 126static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs) 127{ 128 u32 val; 129 130 val = readl(priv->glb_base + GLB_IRQ_ENA); 131 writel(val | irqs, priv->glb_base + GLB_IRQ_ENA); 132} 133 134static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs) 135{ 136 u32 val; 137 138 val = readl(priv->glb_base + GLB_IRQ_ENA); 139 writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA); 140} 141 142static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv, 143 struct sk_buff *skb, unsigned int pos) 144{ 145 dma_addr_t dma_addr; 146 147 dma_addr = priv->txq.dma_phys[pos]; 148 dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE); 149} 150 151static void hisi_femac_xmit_reclaim(struct net_device *dev) 152{ 153 struct sk_buff *skb; 154 struct hisi_femac_priv *priv = netdev_priv(dev); 155 struct hisi_femac_queue *txq = &priv->txq; 156 unsigned int bytes_compl = 0, pkts_compl = 0; 157 u32 val; 158 159 netif_tx_lock(dev); 160 161 val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK; 162 while (val < priv->tx_fifo_used_cnt) { 163 skb = txq->skb[txq->tail]; 164 if (unlikely(!skb)) { 165 netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n", 166 val, priv->tx_fifo_used_cnt); 167 break; 168 } 169 hisi_femac_tx_dma_unmap(priv, skb, txq->tail); 170 pkts_compl++; 171 bytes_compl += skb->len; 172 dev_kfree_skb_any(skb); 173 174 priv->tx_fifo_used_cnt--; 175 176 val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK; 177 txq->skb[txq->tail] = NULL; 178 txq->tail = (txq->tail + 1) % txq->num; 179 } 180 181 netdev_completed_queue(dev, pkts_compl, bytes_compl); 182 183 if (unlikely(netif_queue_stopped(dev)) && pkts_compl) 184 netif_wake_queue(dev); 185 186 netif_tx_unlock(dev); 187} 188 189static void hisi_femac_adjust_link(struct net_device *dev) 190{ 191 struct hisi_femac_priv *priv = netdev_priv(dev); 192 struct phy_device *phy = dev->phydev; 193 u32 status = 0; 194 195 if (phy->link) 196 status |= MAC_PORTSET_LINKED; 197 if (phy->duplex == DUPLEX_FULL) 198 status |= MAC_PORTSET_DUPLEX_FULL; 199 if (phy->speed == SPEED_100) 200 status |= MAC_PORTSET_SPEED_100M; 201 202 if ((status != priv->link_status) && 203 ((status | priv->link_status) & MAC_PORTSET_LINKED)) { 204 writel(status, priv->port_base + MAC_PORTSET); 205 priv->link_status = status; 206 phy_print_status(phy); 207 } 208} 209 210static void hisi_femac_rx_refill(struct hisi_femac_priv *priv) 211{ 212 struct hisi_femac_queue *rxq = &priv->rxq; 213 struct sk_buff *skb; 214 u32 pos; 215 u32 len = MAX_FRAME_SIZE; 216 dma_addr_t addr; 217 218 pos = rxq->head; 219 while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) { 220 if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) 221 break; 222 if (unlikely(rxq->skb[pos])) { 223 netdev_err(priv->ndev, "err skb[%d]=%p\n", 224 pos, rxq->skb[pos]); 225 break; 226 } 227 skb = netdev_alloc_skb_ip_align(priv->ndev, len); 228 if (unlikely(!skb)) 229 break; 230 231 addr = dma_map_single(priv->dev, skb->data, len, 232 DMA_FROM_DEVICE); 233 if (dma_mapping_error(priv->dev, addr)) { 234 dev_kfree_skb_any(skb); 235 break; 236 } 237 rxq->dma_phys[pos] = addr; 238 rxq->skb[pos] = skb; 239 writel(addr, priv->port_base + IQ_ADDR); 240 pos = (pos + 1) % rxq->num; 241 } 242 rxq->head = pos; 243} 244 245static int hisi_femac_rx(struct net_device *dev, int limit) 246{ 247 struct hisi_femac_priv *priv = netdev_priv(dev); 248 struct hisi_femac_queue *rxq = &priv->rxq; 249 struct sk_buff *skb; 250 dma_addr_t addr; 251 u32 rx_pkt_info, pos, len, rx_pkts_num = 0; 252 253 pos = rxq->tail; 254 while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) { 255 rx_pkt_info = readl(priv->port_base + IQFRM_DES); 256 len = rx_pkt_info & RX_FRAME_LEN_MASK; 257 len -= ETH_FCS_LEN; 258 259 /* tell hardware we will deal with this packet */ 260 writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW); 261 262 rx_pkts_num++; 263 264 skb = rxq->skb[pos]; 265 if (unlikely(!skb)) { 266 netdev_err(dev, "rx skb NULL. pos=%d\n", pos); 267 break; 268 } 269 rxq->skb[pos] = NULL; 270 271 addr = rxq->dma_phys[pos]; 272 dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE, 273 DMA_FROM_DEVICE); 274 skb_put(skb, len); 275 if (unlikely(skb->len > MAX_FRAME_SIZE)) { 276 netdev_err(dev, "rcv len err, len = %d\n", skb->len); 277 dev->stats.rx_errors++; 278 dev->stats.rx_length_errors++; 279 dev_kfree_skb_any(skb); 280 goto next; 281 } 282 283 skb->protocol = eth_type_trans(skb, dev); 284 napi_gro_receive(&priv->napi, skb); 285 dev->stats.rx_packets++; 286 dev->stats.rx_bytes += skb->len; 287next: 288 pos = (pos + 1) % rxq->num; 289 if (rx_pkts_num >= limit) 290 break; 291 } 292 rxq->tail = pos; 293 294 hisi_femac_rx_refill(priv); 295 296 return rx_pkts_num; 297} 298 299static int hisi_femac_poll(struct napi_struct *napi, int budget) 300{ 301 struct hisi_femac_priv *priv = container_of(napi, 302 struct hisi_femac_priv, napi); 303 struct net_device *dev = priv->ndev; 304 int work_done = 0, task = budget; 305 int ints, num; 306 307 do { 308 hisi_femac_xmit_reclaim(dev); 309 num = hisi_femac_rx(dev, task); 310 work_done += num; 311 task -= num; 312 if (work_done >= budget) 313 break; 314 315 ints = readl(priv->glb_base + GLB_IRQ_RAW); 316 writel(ints & DEF_INT_MASK, 317 priv->glb_base + GLB_IRQ_RAW); 318 } while (ints & DEF_INT_MASK); 319 320 if (work_done < budget) { 321 napi_complete_done(napi, work_done); 322 hisi_femac_irq_enable(priv, DEF_INT_MASK & 323 (~IRQ_INT_TX_PER_PACKET)); 324 } 325 326 return work_done; 327} 328 329static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id) 330{ 331 int ints; 332 struct net_device *dev = (struct net_device *)dev_id; 333 struct hisi_femac_priv *priv = netdev_priv(dev); 334 335 ints = readl(priv->glb_base + GLB_IRQ_RAW); 336 337 if (likely(ints & DEF_INT_MASK)) { 338 writel(ints & DEF_INT_MASK, 339 priv->glb_base + GLB_IRQ_RAW); 340 hisi_femac_irq_disable(priv, DEF_INT_MASK); 341 napi_schedule(&priv->napi); 342 } 343 344 return IRQ_HANDLED; 345} 346 347static int hisi_femac_init_queue(struct device *dev, 348 struct hisi_femac_queue *queue, 349 unsigned int num) 350{ 351 queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *), 352 GFP_KERNEL); 353 if (!queue->skb) 354 return -ENOMEM; 355 356 queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t), 357 GFP_KERNEL); 358 if (!queue->dma_phys) 359 return -ENOMEM; 360 361 queue->num = num; 362 queue->head = 0; 363 queue->tail = 0; 364 365 return 0; 366} 367 368static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv) 369{ 370 int ret; 371 372 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); 373 if (ret) 374 return ret; 375 376 ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM); 377 if (ret) 378 return ret; 379 380 priv->tx_fifo_used_cnt = 0; 381 382 return 0; 383} 384 385static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv) 386{ 387 struct hisi_femac_queue *txq = &priv->txq; 388 struct hisi_femac_queue *rxq = &priv->rxq; 389 struct sk_buff *skb; 390 dma_addr_t dma_addr; 391 u32 pos; 392 393 pos = rxq->tail; 394 while (pos != rxq->head) { 395 skb = rxq->skb[pos]; 396 if (unlikely(!skb)) { 397 netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n", 398 pos, rxq->head); 399 continue; 400 } 401 402 dma_addr = rxq->dma_phys[pos]; 403 dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE, 404 DMA_FROM_DEVICE); 405 406 dev_kfree_skb_any(skb); 407 rxq->skb[pos] = NULL; 408 pos = (pos + 1) % rxq->num; 409 } 410 rxq->tail = pos; 411 412 pos = txq->tail; 413 while (pos != txq->head) { 414 skb = txq->skb[pos]; 415 if (unlikely(!skb)) { 416 netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n", 417 pos, txq->head); 418 continue; 419 } 420 hisi_femac_tx_dma_unmap(priv, skb, pos); 421 dev_kfree_skb_any(skb); 422 txq->skb[pos] = NULL; 423 pos = (pos + 1) % txq->num; 424 } 425 txq->tail = pos; 426 priv->tx_fifo_used_cnt = 0; 427} 428 429static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv, 430 const unsigned char *mac) 431{ 432 u32 reg; 433 434 reg = mac[1] | (mac[0] << 8); 435 writel(reg, priv->glb_base + GLB_HOSTMAC_H16); 436 437 reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24); 438 writel(reg, priv->glb_base + GLB_HOSTMAC_L32); 439 440 return 0; 441} 442 443static int hisi_femac_port_reset(struct hisi_femac_priv *priv) 444{ 445 u32 val; 446 447 val = readl(priv->glb_base + GLB_SOFT_RESET); 448 val |= SOFT_RESET_ALL; 449 writel(val, priv->glb_base + GLB_SOFT_RESET); 450 451 usleep_range(500, 800); 452 453 val &= ~SOFT_RESET_ALL; 454 writel(val, priv->glb_base + GLB_SOFT_RESET); 455 456 return 0; 457} 458 459static int hisi_femac_net_open(struct net_device *dev) 460{ 461 struct hisi_femac_priv *priv = netdev_priv(dev); 462 463 hisi_femac_port_reset(priv); 464 hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); 465 hisi_femac_rx_refill(priv); 466 467 netif_carrier_off(dev); 468 netdev_reset_queue(dev); 469 netif_start_queue(dev); 470 napi_enable(&priv->napi); 471 472 priv->link_status = 0; 473 if (dev->phydev) 474 phy_start(dev->phydev); 475 476 writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW); 477 hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK); 478 479 return 0; 480} 481 482static int hisi_femac_net_close(struct net_device *dev) 483{ 484 struct hisi_femac_priv *priv = netdev_priv(dev); 485 486 hisi_femac_irq_disable(priv, IRQ_ENA_PORT0); 487 488 if (dev->phydev) 489 phy_stop(dev->phydev); 490 491 netif_stop_queue(dev); 492 napi_disable(&priv->napi); 493 494 hisi_femac_free_skb_rings(priv); 495 496 return 0; 497} 498 499static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb, 500 struct net_device *dev) 501{ 502 struct hisi_femac_priv *priv = netdev_priv(dev); 503 struct hisi_femac_queue *txq = &priv->txq; 504 dma_addr_t addr; 505 u32 val; 506 507 val = readl(priv->port_base + ADDRQ_STAT); 508 val &= BIT_TX_READY; 509 if (!val) { 510 hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET); 511 dev->stats.tx_dropped++; 512 dev->stats.tx_fifo_errors++; 513 netif_stop_queue(dev); 514 return NETDEV_TX_BUSY; 515 } 516 517 if (unlikely(!CIRC_SPACE(txq->head, txq->tail, 518 txq->num))) { 519 hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET); 520 dev->stats.tx_dropped++; 521 dev->stats.tx_fifo_errors++; 522 netif_stop_queue(dev); 523 return NETDEV_TX_BUSY; 524 } 525 526 addr = dma_map_single(priv->dev, skb->data, 527 skb->len, DMA_TO_DEVICE); 528 if (unlikely(dma_mapping_error(priv->dev, addr))) { 529 dev_kfree_skb_any(skb); 530 dev->stats.tx_dropped++; 531 return NETDEV_TX_OK; 532 } 533 txq->dma_phys[txq->head] = addr; 534 535 txq->skb[txq->head] = skb; 536 txq->head = (txq->head + 1) % txq->num; 537 538 writel(addr, priv->port_base + EQ_ADDR); 539 writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN); 540 541 priv->tx_fifo_used_cnt++; 542 543 dev->stats.tx_packets++; 544 dev->stats.tx_bytes += skb->len; 545 netdev_sent_queue(dev, skb->len); 546 547 return NETDEV_TX_OK; 548} 549 550static int hisi_femac_set_mac_address(struct net_device *dev, void *p) 551{ 552 struct hisi_femac_priv *priv = netdev_priv(dev); 553 struct sockaddr *skaddr = p; 554 555 if (!is_valid_ether_addr(skaddr->sa_data)) 556 return -EADDRNOTAVAIL; 557 558 eth_hw_addr_set(dev, skaddr->sa_data); 559 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 560 561 hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); 562 563 return 0; 564} 565 566static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv, 567 unsigned int reg_n, bool enable) 568{ 569 u32 val; 570 571 val = readl(priv->glb_base + GLB_MAC_H16(reg_n)); 572 if (enable) 573 val |= BIT_MACFLT_ENA; 574 else 575 val &= ~BIT_MACFLT_ENA; 576 writel(val, priv->glb_base + GLB_MAC_H16(reg_n)); 577} 578 579static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv, 580 unsigned char *addr, 581 unsigned int reg_n) 582{ 583 unsigned int high, low; 584 u32 val; 585 586 high = GLB_MAC_H16(reg_n); 587 low = GLB_MAC_L32(reg_n); 588 589 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; 590 writel(val, priv->glb_base + low); 591 592 val = readl(priv->glb_base + high); 593 val &= ~MACFLT_HI16_MASK; 594 val |= ((addr[0] << 8) | addr[1]); 595 val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU); 596 writel(val, priv->glb_base + high); 597} 598 599static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv, 600 bool promisc_mode) 601{ 602 u32 val; 603 604 val = readl(priv->glb_base + GLB_FWCTRL); 605 if (promisc_mode) 606 val |= FWCTRL_FWALL2CPU; 607 else 608 val &= ~FWCTRL_FWALL2CPU; 609 writel(val, priv->glb_base + GLB_FWCTRL); 610} 611 612/* Handle multiple multicast addresses (perfect filtering)*/ 613static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv) 614{ 615 struct net_device *dev = priv->ndev; 616 u32 val; 617 618 val = readl(priv->glb_base + GLB_MACTCTRL); 619 if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) || 620 (dev->flags & IFF_ALLMULTI)) { 621 val |= MACTCTRL_MULTI2CPU; 622 } else { 623 int reg = MAX_UNICAST_ADDRESSES; 624 int i; 625 struct netdev_hw_addr *ha; 626 627 for (i = reg; i < MAX_MAC_FILTER_NUM; i++) 628 hisi_femac_enable_hw_addr_filter(priv, i, false); 629 630 netdev_for_each_mc_addr(ha, dev) { 631 hisi_femac_set_hw_addr_filter(priv, ha->addr, reg); 632 reg++; 633 } 634 val &= ~MACTCTRL_MULTI2CPU; 635 } 636 writel(val, priv->glb_base + GLB_MACTCTRL); 637} 638 639/* Handle multiple unicast addresses (perfect filtering)*/ 640static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv) 641{ 642 struct net_device *dev = priv->ndev; 643 u32 val; 644 645 val = readl(priv->glb_base + GLB_MACTCTRL); 646 if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) { 647 val |= MACTCTRL_UNI2CPU; 648 } else { 649 int reg = 0; 650 int i; 651 struct netdev_hw_addr *ha; 652 653 for (i = reg; i < MAX_UNICAST_ADDRESSES; i++) 654 hisi_femac_enable_hw_addr_filter(priv, i, false); 655 656 netdev_for_each_uc_addr(ha, dev) { 657 hisi_femac_set_hw_addr_filter(priv, ha->addr, reg); 658 reg++; 659 } 660 val &= ~MACTCTRL_UNI2CPU; 661 } 662 writel(val, priv->glb_base + GLB_MACTCTRL); 663} 664 665static void hisi_femac_net_set_rx_mode(struct net_device *dev) 666{ 667 struct hisi_femac_priv *priv = netdev_priv(dev); 668 669 if (dev->flags & IFF_PROMISC) { 670 hisi_femac_set_promisc_mode(priv, true); 671 } else { 672 hisi_femac_set_promisc_mode(priv, false); 673 hisi_femac_set_mc_addr_filter(priv); 674 hisi_femac_set_uc_addr_filter(priv); 675 } 676} 677 678static const struct ethtool_ops hisi_femac_ethtools_ops = { 679 .get_link = ethtool_op_get_link, 680 .get_link_ksettings = phy_ethtool_get_link_ksettings, 681 .set_link_ksettings = phy_ethtool_set_link_ksettings, 682}; 683 684static const struct net_device_ops hisi_femac_netdev_ops = { 685 .ndo_open = hisi_femac_net_open, 686 .ndo_stop = hisi_femac_net_close, 687 .ndo_start_xmit = hisi_femac_net_xmit, 688 .ndo_eth_ioctl = phy_do_ioctl_running, 689 .ndo_set_mac_address = hisi_femac_set_mac_address, 690 .ndo_set_rx_mode = hisi_femac_net_set_rx_mode, 691}; 692 693static void hisi_femac_core_reset(struct hisi_femac_priv *priv) 694{ 695 reset_control_assert(priv->mac_rst); 696 reset_control_deassert(priv->mac_rst); 697} 698 699static void hisi_femac_sleep_us(u32 time_us) 700{ 701 u32 time_ms; 702 703 if (!time_us) 704 return; 705 706 time_ms = DIV_ROUND_UP(time_us, 1000); 707 if (time_ms < 20) 708 usleep_range(time_us, time_us + 500); 709 else 710 msleep(time_ms); 711} 712 713static void hisi_femac_phy_reset(struct hisi_femac_priv *priv) 714{ 715 /* To make sure PHY hardware reset success, 716 * we must keep PHY in deassert state first and 717 * then complete the hardware reset operation 718 */ 719 reset_control_deassert(priv->phy_rst); 720 hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]); 721 722 reset_control_assert(priv->phy_rst); 723 /* delay some time to ensure reset ok, 724 * this depends on PHY hardware feature 725 */ 726 hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]); 727 reset_control_deassert(priv->phy_rst); 728 /* delay some time to ensure later MDIO access */ 729 hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]); 730} 731 732static void hisi_femac_port_init(struct hisi_femac_priv *priv) 733{ 734 u32 val; 735 736 /* MAC gets link status info and phy mode by software config */ 737 val = MAC_PORTSEL_STAT_CPU; 738 if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII) 739 val |= MAC_PORTSEL_RMII; 740 writel(val, priv->port_base + MAC_PORTSEL); 741 742 /*clear all interrupt status */ 743 writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW); 744 hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0); 745 746 val = readl(priv->glb_base + GLB_FWCTRL); 747 val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU); 748 val |= FWCTRL_FW2CPU_ENA; 749 writel(val, priv->glb_base + GLB_FWCTRL); 750 751 val = readl(priv->glb_base + GLB_MACTCTRL); 752 val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA); 753 writel(val, priv->glb_base + GLB_MACTCTRL); 754 755 val = readl(priv->port_base + MAC_SET); 756 val &= ~MAX_FRAME_SIZE_MASK; 757 val |= MAX_FRAME_SIZE; 758 writel(val, priv->port_base + MAC_SET); 759 760 val = RX_COALESCED_TIMER | 761 (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET); 762 writel(val, priv->port_base + RX_COALESCE_SET); 763 764 val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH; 765 writel(val, priv->port_base + QLEN_SET); 766} 767 768static int hisi_femac_drv_probe(struct platform_device *pdev) 769{ 770 struct device *dev = &pdev->dev; 771 struct device_node *node = dev->of_node; 772 struct net_device *ndev; 773 struct hisi_femac_priv *priv; 774 struct phy_device *phy; 775 int ret; 776 777 ndev = alloc_etherdev(sizeof(*priv)); 778 if (!ndev) 779 return -ENOMEM; 780 781 platform_set_drvdata(pdev, ndev); 782 SET_NETDEV_DEV(ndev, &pdev->dev); 783 784 priv = netdev_priv(ndev); 785 priv->dev = dev; 786 priv->ndev = ndev; 787 788 priv->port_base = devm_platform_ioremap_resource(pdev, 0); 789 if (IS_ERR(priv->port_base)) { 790 ret = PTR_ERR(priv->port_base); 791 goto out_free_netdev; 792 } 793 794 priv->glb_base = devm_platform_ioremap_resource(pdev, 1); 795 if (IS_ERR(priv->glb_base)) { 796 ret = PTR_ERR(priv->glb_base); 797 goto out_free_netdev; 798 } 799 800 priv->clk = devm_clk_get(&pdev->dev, NULL); 801 if (IS_ERR(priv->clk)) { 802 dev_err(dev, "failed to get clk\n"); 803 ret = -ENODEV; 804 goto out_free_netdev; 805 } 806 807 ret = clk_prepare_enable(priv->clk); 808 if (ret) { 809 dev_err(dev, "failed to enable clk %d\n", ret); 810 goto out_free_netdev; 811 } 812 813 priv->mac_rst = devm_reset_control_get(dev, "mac"); 814 if (IS_ERR(priv->mac_rst)) { 815 ret = PTR_ERR(priv->mac_rst); 816 goto out_disable_clk; 817 } 818 hisi_femac_core_reset(priv); 819 820 priv->phy_rst = devm_reset_control_get(dev, "phy"); 821 if (IS_ERR(priv->phy_rst)) { 822 priv->phy_rst = NULL; 823 } else { 824 ret = of_property_read_u32_array(node, 825 PHY_RESET_DELAYS_PROPERTY, 826 priv->phy_reset_delays, 827 DELAYS_NUM); 828 if (ret) 829 goto out_disable_clk; 830 hisi_femac_phy_reset(priv); 831 } 832 833 phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link); 834 if (!phy) { 835 dev_err(dev, "connect to PHY failed!\n"); 836 ret = -ENODEV; 837 goto out_disable_clk; 838 } 839 840 phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", 841 (unsigned long)phy->phy_id, 842 phy_modes(phy->interface)); 843 844 ret = of_get_ethdev_address(node, ndev); 845 if (ret) { 846 eth_hw_addr_random(ndev); 847 dev_warn(dev, "using random MAC address %pM\n", 848 ndev->dev_addr); 849 } 850 851 ndev->watchdog_timeo = 6 * HZ; 852 ndev->priv_flags |= IFF_UNICAST_FLT; 853 ndev->netdev_ops = &hisi_femac_netdev_ops; 854 ndev->ethtool_ops = &hisi_femac_ethtools_ops; 855 netif_napi_add_weight(ndev, &priv->napi, hisi_femac_poll, 856 FEMAC_POLL_WEIGHT); 857 858 hisi_femac_port_init(priv); 859 860 ret = hisi_femac_init_tx_and_rx_queues(priv); 861 if (ret) 862 goto out_disconnect_phy; 863 864 ndev->irq = platform_get_irq(pdev, 0); 865 if (ndev->irq <= 0) { 866 ret = -ENODEV; 867 goto out_disconnect_phy; 868 } 869 870 ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt, 871 IRQF_SHARED, pdev->name, ndev); 872 if (ret) { 873 dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq); 874 goto out_disconnect_phy; 875 } 876 877 ret = register_netdev(ndev); 878 if (ret) { 879 dev_err(dev, "register_netdev failed!\n"); 880 goto out_disconnect_phy; 881 } 882 883 return ret; 884 885out_disconnect_phy: 886 netif_napi_del(&priv->napi); 887 phy_disconnect(phy); 888out_disable_clk: 889 clk_disable_unprepare(priv->clk); 890out_free_netdev: 891 free_netdev(ndev); 892 893 return ret; 894} 895 896static int hisi_femac_drv_remove(struct platform_device *pdev) 897{ 898 struct net_device *ndev = platform_get_drvdata(pdev); 899 struct hisi_femac_priv *priv = netdev_priv(ndev); 900 901 netif_napi_del(&priv->napi); 902 unregister_netdev(ndev); 903 904 phy_disconnect(ndev->phydev); 905 clk_disable_unprepare(priv->clk); 906 free_netdev(ndev); 907 908 return 0; 909} 910 911#ifdef CONFIG_PM 912static int hisi_femac_drv_suspend(struct platform_device *pdev, 913 pm_message_t state) 914{ 915 struct net_device *ndev = platform_get_drvdata(pdev); 916 struct hisi_femac_priv *priv = netdev_priv(ndev); 917 918 disable_irq(ndev->irq); 919 if (netif_running(ndev)) { 920 hisi_femac_net_close(ndev); 921 netif_device_detach(ndev); 922 } 923 924 clk_disable_unprepare(priv->clk); 925 926 return 0; 927} 928 929static int hisi_femac_drv_resume(struct platform_device *pdev) 930{ 931 struct net_device *ndev = platform_get_drvdata(pdev); 932 struct hisi_femac_priv *priv = netdev_priv(ndev); 933 934 clk_prepare_enable(priv->clk); 935 if (priv->phy_rst) 936 hisi_femac_phy_reset(priv); 937 938 if (netif_running(ndev)) { 939 hisi_femac_port_init(priv); 940 hisi_femac_net_open(ndev); 941 netif_device_attach(ndev); 942 } 943 enable_irq(ndev->irq); 944 945 return 0; 946} 947#endif 948 949static const struct of_device_id hisi_femac_match[] = { 950 {.compatible = "hisilicon,hisi-femac-v1",}, 951 {.compatible = "hisilicon,hisi-femac-v2",}, 952 {.compatible = "hisilicon,hi3516cv300-femac",}, 953 {}, 954}; 955 956MODULE_DEVICE_TABLE(of, hisi_femac_match); 957 958static struct platform_driver hisi_femac_driver = { 959 .driver = { 960 .name = "hisi-femac", 961 .of_match_table = hisi_femac_match, 962 }, 963 .probe = hisi_femac_drv_probe, 964 .remove = hisi_femac_drv_remove, 965#ifdef CONFIG_PM 966 .suspend = hisi_femac_drv_suspend, 967 .resume = hisi_femac_drv_resume, 968#endif 969}; 970 971module_platform_driver(hisi_femac_driver); 972 973MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver"); 974MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); 975MODULE_LICENSE("GPL v2"); 976MODULE_ALIAS("platform:hisi-femac");