imx_fec.c (38147B)
1/* 2 * i.MX Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net> 5 * 6 * Based on Coldfire Fast Ethernet Controller emulation. 7 * 8 * Copyright (c) 2007 CodeSourcery. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, see <http://www.gnu.org/licenses/>. 22 */ 23 24#include "qemu/osdep.h" 25#include "hw/irq.h" 26#include "hw/net/imx_fec.h" 27#include "hw/qdev-properties.h" 28#include "migration/vmstate.h" 29#include "sysemu/dma.h" 30#include "qemu/log.h" 31#include "qemu/module.h" 32#include "net/checksum.h" 33#include "net/eth.h" 34#include "trace.h" 35 36/* For crc32 */ 37#include <zlib.h> 38 39#define IMX_MAX_DESC 1024 40 41static const char *imx_default_reg_name(IMXFECState *s, uint32_t index) 42{ 43 static char tmp[20]; 44 sprintf(tmp, "index %d", index); 45 return tmp; 46} 47 48static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index) 49{ 50 switch (index) { 51 case ENET_FRBR: 52 return "FRBR"; 53 case ENET_FRSR: 54 return "FRSR"; 55 case ENET_MIIGSK_CFGR: 56 return "MIIGSK_CFGR"; 57 case ENET_MIIGSK_ENR: 58 return "MIIGSK_ENR"; 59 default: 60 return imx_default_reg_name(s, index); 61 } 62} 63 64static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index) 65{ 66 switch (index) { 67 case ENET_RSFL: 68 return "RSFL"; 69 case ENET_RSEM: 70 return "RSEM"; 71 case ENET_RAEM: 72 return "RAEM"; 73 case ENET_RAFL: 74 return "RAFL"; 75 case ENET_TSEM: 76 return "TSEM"; 77 case ENET_TAEM: 78 return "TAEM"; 79 case ENET_TAFL: 80 return "TAFL"; 81 case ENET_TIPG: 82 return "TIPG"; 83 case ENET_FTRL: 84 return "FTRL"; 85 case ENET_TACC: 86 return "TACC"; 87 case ENET_RACC: 88 return "RACC"; 89 case ENET_ATCR: 90 return "ATCR"; 91 case ENET_ATVR: 92 return "ATVR"; 93 case ENET_ATOFF: 94 return "ATOFF"; 95 case ENET_ATPER: 96 return "ATPER"; 97 case ENET_ATCOR: 98 return "ATCOR"; 99 case ENET_ATINC: 100 return "ATINC"; 101 case ENET_ATSTMP: 102 return "ATSTMP"; 103 case ENET_TGSR: 104 return "TGSR"; 105 case ENET_TCSR0: 106 return "TCSR0"; 107 case ENET_TCCR0: 108 return "TCCR0"; 109 case ENET_TCSR1: 110 return "TCSR1"; 111 case ENET_TCCR1: 112 return "TCCR1"; 113 case ENET_TCSR2: 114 return "TCSR2"; 115 case ENET_TCCR2: 116 return "TCCR2"; 117 case ENET_TCSR3: 118 return "TCSR3"; 119 case ENET_TCCR3: 120 return "TCCR3"; 121 default: 122 return imx_default_reg_name(s, index); 123 } 124} 125 126static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index) 127{ 128 switch (index) { 129 case ENET_EIR: 130 return "EIR"; 131 case ENET_EIMR: 132 return "EIMR"; 133 case ENET_RDAR: 134 return "RDAR"; 135 case ENET_TDAR: 136 return "TDAR"; 137 case ENET_ECR: 138 return "ECR"; 139 case ENET_MMFR: 140 return "MMFR"; 141 case ENET_MSCR: 142 return "MSCR"; 143 case ENET_MIBC: 144 return "MIBC"; 145 case ENET_RCR: 146 return "RCR"; 147 case ENET_TCR: 148 return "TCR"; 149 case ENET_PALR: 150 return "PALR"; 151 case ENET_PAUR: 152 return "PAUR"; 153 case ENET_OPD: 154 return "OPD"; 155 case ENET_IAUR: 156 return "IAUR"; 157 case ENET_IALR: 158 return "IALR"; 159 case ENET_GAUR: 160 return "GAUR"; 161 case ENET_GALR: 162 return "GALR"; 163 case ENET_TFWR: 164 return "TFWR"; 165 case ENET_RDSR: 166 return "RDSR"; 167 case ENET_TDSR: 168 return "TDSR"; 169 case ENET_MRBR: 170 return "MRBR"; 171 default: 172 if (s->is_fec) { 173 return imx_fec_reg_name(s, index); 174 } else { 175 return imx_enet_reg_name(s, index); 176 } 177 } 178} 179 180/* 181 * Versions of this device with more than one TX descriptor save the 182 * 2nd and 3rd descriptors in a subsection, to maintain migration 183 * compatibility with previous versions of the device that only 184 * supported a single descriptor. 185 */ 186static bool imx_eth_is_multi_tx_ring(void *opaque) 187{ 188 IMXFECState *s = IMX_FEC(opaque); 189 190 return s->tx_ring_num > 1; 191} 192 193static const VMStateDescription vmstate_imx_eth_txdescs = { 194 .name = "imx.fec/txdescs", 195 .version_id = 1, 196 .minimum_version_id = 1, 197 .needed = imx_eth_is_multi_tx_ring, 198 .fields = (VMStateField[]) { 199 VMSTATE_UINT32(tx_descriptor[1], IMXFECState), 200 VMSTATE_UINT32(tx_descriptor[2], IMXFECState), 201 VMSTATE_END_OF_LIST() 202 } 203}; 204 205static const VMStateDescription vmstate_imx_eth = { 206 .name = TYPE_IMX_FEC, 207 .version_id = 2, 208 .minimum_version_id = 2, 209 .fields = (VMStateField[]) { 210 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), 211 VMSTATE_UINT32(rx_descriptor, IMXFECState), 212 VMSTATE_UINT32(tx_descriptor[0], IMXFECState), 213 VMSTATE_UINT32(phy_status, IMXFECState), 214 VMSTATE_UINT32(phy_control, IMXFECState), 215 VMSTATE_UINT32(phy_advertise, IMXFECState), 216 VMSTATE_UINT32(phy_int, IMXFECState), 217 VMSTATE_UINT32(phy_int_mask, IMXFECState), 218 VMSTATE_END_OF_LIST() 219 }, 220 .subsections = (const VMStateDescription * []) { 221 &vmstate_imx_eth_txdescs, 222 NULL 223 }, 224}; 225 226#define PHY_INT_ENERGYON (1 << 7) 227#define PHY_INT_AUTONEG_COMPLETE (1 << 6) 228#define PHY_INT_FAULT (1 << 5) 229#define PHY_INT_DOWN (1 << 4) 230#define PHY_INT_AUTONEG_LP (1 << 3) 231#define PHY_INT_PARFAULT (1 << 2) 232#define PHY_INT_AUTONEG_PAGE (1 << 1) 233 234static void imx_eth_update(IMXFECState *s); 235 236/* 237 * The MII phy could raise a GPIO to the processor which in turn 238 * could be handled as an interrpt by the OS. 239 * For now we don't handle any GPIO/interrupt line, so the OS will 240 * have to poll for the PHY status. 241 */ 242static void imx_phy_update_irq(IMXFECState *s) 243{ 244 imx_eth_update(s); 245} 246 247static void imx_phy_update_link(IMXFECState *s) 248{ 249 /* Autonegotiation status mirrors link status. */ 250 if (qemu_get_queue(s->nic)->link_down) { 251 trace_imx_phy_update_link("down"); 252 s->phy_status &= ~0x0024; 253 s->phy_int |= PHY_INT_DOWN; 254 } else { 255 trace_imx_phy_update_link("up"); 256 s->phy_status |= 0x0024; 257 s->phy_int |= PHY_INT_ENERGYON; 258 s->phy_int |= PHY_INT_AUTONEG_COMPLETE; 259 } 260 imx_phy_update_irq(s); 261} 262 263static void imx_eth_set_link(NetClientState *nc) 264{ 265 imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc))); 266} 267 268static void imx_phy_reset(IMXFECState *s) 269{ 270 trace_imx_phy_reset(); 271 272 s->phy_status = 0x7809; 273 s->phy_control = 0x3000; 274 s->phy_advertise = 0x01e1; 275 s->phy_int_mask = 0; 276 s->phy_int = 0; 277 imx_phy_update_link(s); 278} 279 280static uint32_t imx_phy_read(IMXFECState *s, int reg) 281{ 282 uint32_t val; 283 uint32_t phy = reg / 32; 284 285 if (phy != s->phy_num) { 286 trace_imx_phy_read_num(phy, s->phy_num); 287 return 0xffff; 288 } 289 290 reg %= 32; 291 292 switch (reg) { 293 case 0: /* Basic Control */ 294 val = s->phy_control; 295 break; 296 case 1: /* Basic Status */ 297 val = s->phy_status; 298 break; 299 case 2: /* ID1 */ 300 val = 0x0007; 301 break; 302 case 3: /* ID2 */ 303 val = 0xc0d1; 304 break; 305 case 4: /* Auto-neg advertisement */ 306 val = s->phy_advertise; 307 break; 308 case 5: /* Auto-neg Link Partner Ability */ 309 val = 0x0f71; 310 break; 311 case 6: /* Auto-neg Expansion */ 312 val = 1; 313 break; 314 case 29: /* Interrupt source. */ 315 val = s->phy_int; 316 s->phy_int = 0; 317 imx_phy_update_irq(s); 318 break; 319 case 30: /* Interrupt mask */ 320 val = s->phy_int_mask; 321 break; 322 case 17: 323 case 18: 324 case 27: 325 case 31: 326 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n", 327 TYPE_IMX_FEC, __func__, reg); 328 val = 0; 329 break; 330 default: 331 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", 332 TYPE_IMX_FEC, __func__, reg); 333 val = 0; 334 break; 335 } 336 337 trace_imx_phy_read(val, phy, reg); 338 339 return val; 340} 341 342static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) 343{ 344 uint32_t phy = reg / 32; 345 346 if (phy != s->phy_num) { 347 trace_imx_phy_write_num(phy, s->phy_num); 348 return; 349 } 350 351 reg %= 32; 352 353 trace_imx_phy_write(val, phy, reg); 354 355 switch (reg) { 356 case 0: /* Basic Control */ 357 if (val & 0x8000) { 358 imx_phy_reset(s); 359 } else { 360 s->phy_control = val & 0x7980; 361 /* Complete autonegotiation immediately. */ 362 if (val & 0x1000) { 363 s->phy_status |= 0x0020; 364 } 365 } 366 break; 367 case 4: /* Auto-neg advertisement */ 368 s->phy_advertise = (val & 0x2d7f) | 0x80; 369 break; 370 case 30: /* Interrupt mask */ 371 s->phy_int_mask = val & 0xff; 372 imx_phy_update_irq(s); 373 break; 374 case 17: 375 case 18: 376 case 27: 377 case 31: 378 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n", 379 TYPE_IMX_FEC, __func__, reg); 380 break; 381 default: 382 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", 383 TYPE_IMX_FEC, __func__, reg); 384 break; 385 } 386} 387 388static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) 389{ 390 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); 391 392 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data); 393} 394 395static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr) 396{ 397 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); 398} 399 400static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) 401{ 402 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); 403 404 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data, 405 bd->option, bd->status); 406} 407 408static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr) 409{ 410 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); 411} 412 413static void imx_eth_update(IMXFECState *s) 414{ 415 /* 416 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER 417 * interrupts swapped. This worked with older versions of Linux (4.14 418 * and older) since Linux associated both interrupt lines with Ethernet 419 * MAC interrupts. Specifically, 420 * - Linux 4.15 and later have separate interrupt handlers for the MAC and 421 * timer interrupts. Those versions of Linux fail with versions of QEMU 422 * with swapped interrupt assignments. 423 * - In linux 4.14, both interrupt lines were registered with the Ethernet 424 * MAC interrupt handler. As a result, all versions of qemu happen to 425 * work, though that is accidental. 426 * - In Linux 4.9 and older, the timer interrupt was registered directly 427 * with the Ethernet MAC interrupt handler. The MAC interrupt was 428 * redirected to a GPIO interrupt to work around erratum ERR006687. 429 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO 430 * interrupt never fired since IOMUX is currently not supported in qemu. 431 * Linux instead received MAC interrupts on the timer interrupt. 432 * As a result, qemu versions with the swapped interrupt assignment work, 433 * albeit accidentally, but qemu versions with the correct interrupt 434 * assignment fail. 435 * 436 * To ensure that all versions of Linux work, generate ENET_INT_MAC 437 * interrrupts on both interrupt lines. This should be changed if and when 438 * qemu supports IOMUX. 439 */ 440 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & 441 (ENET_INT_MAC | ENET_INT_TS_TIMER)) { 442 qemu_set_irq(s->irq[1], 1); 443 } else { 444 qemu_set_irq(s->irq[1], 0); 445 } 446 447 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) { 448 qemu_set_irq(s->irq[0], 1); 449 } else { 450 qemu_set_irq(s->irq[0], 0); 451 } 452} 453 454static void imx_fec_do_tx(IMXFECState *s) 455{ 456 int frame_size = 0, descnt = 0; 457 uint8_t *ptr = s->frame; 458 uint32_t addr = s->tx_descriptor[0]; 459 460 while (descnt++ < IMX_MAX_DESC) { 461 IMXFECBufDesc bd; 462 int len; 463 464 imx_fec_read_bd(&bd, addr); 465 if ((bd.flags & ENET_BD_R) == 0) { 466 467 /* Run out of descriptors to transmit. */ 468 trace_imx_eth_tx_bd_busy(); 469 470 break; 471 } 472 len = bd.length; 473 if (frame_size + len > ENET_MAX_FRAME_SIZE) { 474 len = ENET_MAX_FRAME_SIZE - frame_size; 475 s->regs[ENET_EIR] |= ENET_INT_BABT; 476 } 477 dma_memory_read(&address_space_memory, bd.data, ptr, len); 478 ptr += len; 479 frame_size += len; 480 if (bd.flags & ENET_BD_L) { 481 /* Last buffer in frame. */ 482 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); 483 ptr = s->frame; 484 frame_size = 0; 485 s->regs[ENET_EIR] |= ENET_INT_TXF; 486 } 487 s->regs[ENET_EIR] |= ENET_INT_TXB; 488 bd.flags &= ~ENET_BD_R; 489 /* Write back the modified descriptor. */ 490 imx_fec_write_bd(&bd, addr); 491 /* Advance to the next descriptor. */ 492 if ((bd.flags & ENET_BD_W) != 0) { 493 addr = s->regs[ENET_TDSR]; 494 } else { 495 addr += sizeof(bd); 496 } 497 } 498 499 s->tx_descriptor[0] = addr; 500 501 imx_eth_update(s); 502} 503 504static void imx_enet_do_tx(IMXFECState *s, uint32_t index) 505{ 506 int frame_size = 0, descnt = 0; 507 508 uint8_t *ptr = s->frame; 509 uint32_t addr, int_txb, int_txf, tdsr; 510 size_t ring; 511 512 switch (index) { 513 case ENET_TDAR: 514 ring = 0; 515 int_txb = ENET_INT_TXB; 516 int_txf = ENET_INT_TXF; 517 tdsr = ENET_TDSR; 518 break; 519 case ENET_TDAR1: 520 ring = 1; 521 int_txb = ENET_INT_TXB1; 522 int_txf = ENET_INT_TXF1; 523 tdsr = ENET_TDSR1; 524 break; 525 case ENET_TDAR2: 526 ring = 2; 527 int_txb = ENET_INT_TXB2; 528 int_txf = ENET_INT_TXF2; 529 tdsr = ENET_TDSR2; 530 break; 531 default: 532 qemu_log_mask(LOG_GUEST_ERROR, 533 "%s: bogus value for index %x\n", 534 __func__, index); 535 abort(); 536 break; 537 } 538 539 addr = s->tx_descriptor[ring]; 540 541 while (descnt++ < IMX_MAX_DESC) { 542 IMXENETBufDesc bd; 543 int len; 544 545 imx_enet_read_bd(&bd, addr); 546 if ((bd.flags & ENET_BD_R) == 0) { 547 /* Run out of descriptors to transmit. */ 548 549 trace_imx_eth_tx_bd_busy(); 550 551 break; 552 } 553 len = bd.length; 554 if (frame_size + len > ENET_MAX_FRAME_SIZE) { 555 len = ENET_MAX_FRAME_SIZE - frame_size; 556 s->regs[ENET_EIR] |= ENET_INT_BABT; 557 } 558 dma_memory_read(&address_space_memory, bd.data, ptr, len); 559 ptr += len; 560 frame_size += len; 561 if (bd.flags & ENET_BD_L) { 562 int csum = 0; 563 564 if (bd.option & ENET_BD_PINS) { 565 csum |= (CSUM_TCP | CSUM_UDP); 566 } 567 if (bd.option & ENET_BD_IINS) { 568 csum |= CSUM_IP; 569 } 570 if (csum) { 571 net_checksum_calculate(s->frame, frame_size, csum); 572 } 573 574 /* Last buffer in frame. */ 575 576 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); 577 ptr = s->frame; 578 579 frame_size = 0; 580 if (bd.option & ENET_BD_TX_INT) { 581 s->regs[ENET_EIR] |= int_txf; 582 } 583 /* Indicate that we've updated the last buffer descriptor. */ 584 bd.last_buffer = ENET_BD_BDU; 585 } 586 if (bd.option & ENET_BD_TX_INT) { 587 s->regs[ENET_EIR] |= int_txb; 588 } 589 bd.flags &= ~ENET_BD_R; 590 /* Write back the modified descriptor. */ 591 imx_enet_write_bd(&bd, addr); 592 /* Advance to the next descriptor. */ 593 if ((bd.flags & ENET_BD_W) != 0) { 594 addr = s->regs[tdsr]; 595 } else { 596 addr += sizeof(bd); 597 } 598 } 599 600 s->tx_descriptor[ring] = addr; 601 602 imx_eth_update(s); 603} 604 605static void imx_eth_do_tx(IMXFECState *s, uint32_t index) 606{ 607 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { 608 imx_enet_do_tx(s, index); 609 } else { 610 imx_fec_do_tx(s); 611 } 612} 613 614static void imx_eth_enable_rx(IMXFECState *s, bool flush) 615{ 616 IMXFECBufDesc bd; 617 618 imx_fec_read_bd(&bd, s->rx_descriptor); 619 620 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0; 621 622 if (!s->regs[ENET_RDAR]) { 623 trace_imx_eth_rx_bd_full(); 624 } else if (flush) { 625 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 626 } 627} 628 629static void imx_eth_reset(DeviceState *d) 630{ 631 IMXFECState *s = IMX_FEC(d); 632 633 /* Reset the Device */ 634 memset(s->regs, 0, sizeof(s->regs)); 635 s->regs[ENET_ECR] = 0xf0000000; 636 s->regs[ENET_MIBC] = 0xc0000000; 637 s->regs[ENET_RCR] = 0x05ee0001; 638 s->regs[ENET_OPD] = 0x00010000; 639 640 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24) 641 | (s->conf.macaddr.a[1] << 16) 642 | (s->conf.macaddr.a[2] << 8) 643 | s->conf.macaddr.a[3]; 644 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24) 645 | (s->conf.macaddr.a[5] << 16) 646 | 0x8808; 647 648 if (s->is_fec) { 649 s->regs[ENET_FRBR] = 0x00000600; 650 s->regs[ENET_FRSR] = 0x00000500; 651 s->regs[ENET_MIIGSK_ENR] = 0x00000006; 652 } else { 653 s->regs[ENET_RAEM] = 0x00000004; 654 s->regs[ENET_RAFL] = 0x00000004; 655 s->regs[ENET_TAEM] = 0x00000004; 656 s->regs[ENET_TAFL] = 0x00000008; 657 s->regs[ENET_TIPG] = 0x0000000c; 658 s->regs[ENET_FTRL] = 0x000007ff; 659 s->regs[ENET_ATPER] = 0x3b9aca00; 660 } 661 662 s->rx_descriptor = 0; 663 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor)); 664 665 /* We also reset the PHY */ 666 imx_phy_reset(s); 667} 668 669static uint32_t imx_default_read(IMXFECState *s, uint32_t index) 670{ 671 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" 672 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); 673 return 0; 674} 675 676static uint32_t imx_fec_read(IMXFECState *s, uint32_t index) 677{ 678 switch (index) { 679 case ENET_FRBR: 680 case ENET_FRSR: 681 case ENET_MIIGSK_CFGR: 682 case ENET_MIIGSK_ENR: 683 return s->regs[index]; 684 default: 685 return imx_default_read(s, index); 686 } 687} 688 689static uint32_t imx_enet_read(IMXFECState *s, uint32_t index) 690{ 691 switch (index) { 692 case ENET_RSFL: 693 case ENET_RSEM: 694 case ENET_RAEM: 695 case ENET_RAFL: 696 case ENET_TSEM: 697 case ENET_TAEM: 698 case ENET_TAFL: 699 case ENET_TIPG: 700 case ENET_FTRL: 701 case ENET_TACC: 702 case ENET_RACC: 703 case ENET_ATCR: 704 case ENET_ATVR: 705 case ENET_ATOFF: 706 case ENET_ATPER: 707 case ENET_ATCOR: 708 case ENET_ATINC: 709 case ENET_ATSTMP: 710 case ENET_TGSR: 711 case ENET_TCSR0: 712 case ENET_TCCR0: 713 case ENET_TCSR1: 714 case ENET_TCCR1: 715 case ENET_TCSR2: 716 case ENET_TCCR2: 717 case ENET_TCSR3: 718 case ENET_TCCR3: 719 return s->regs[index]; 720 default: 721 return imx_default_read(s, index); 722 } 723} 724 725static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size) 726{ 727 uint32_t value = 0; 728 IMXFECState *s = IMX_FEC(opaque); 729 uint32_t index = offset >> 2; 730 731 switch (index) { 732 case ENET_EIR: 733 case ENET_EIMR: 734 case ENET_RDAR: 735 case ENET_TDAR: 736 case ENET_ECR: 737 case ENET_MMFR: 738 case ENET_MSCR: 739 case ENET_MIBC: 740 case ENET_RCR: 741 case ENET_TCR: 742 case ENET_PALR: 743 case ENET_PAUR: 744 case ENET_OPD: 745 case ENET_IAUR: 746 case ENET_IALR: 747 case ENET_GAUR: 748 case ENET_GALR: 749 case ENET_TFWR: 750 case ENET_RDSR: 751 case ENET_TDSR: 752 case ENET_MRBR: 753 value = s->regs[index]; 754 break; 755 default: 756 if (s->is_fec) { 757 value = imx_fec_read(s, index); 758 } else { 759 value = imx_enet_read(s, index); 760 } 761 break; 762 } 763 764 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value); 765 766 return value; 767} 768 769static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value) 770{ 771 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" 772 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); 773 return; 774} 775 776static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value) 777{ 778 switch (index) { 779 case ENET_FRBR: 780 /* FRBR is read only */ 781 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n", 782 TYPE_IMX_FEC, __func__); 783 break; 784 case ENET_FRSR: 785 s->regs[index] = (value & 0x000003fc) | 0x00000400; 786 break; 787 case ENET_MIIGSK_CFGR: 788 s->regs[index] = value & 0x00000053; 789 break; 790 case ENET_MIIGSK_ENR: 791 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0; 792 break; 793 default: 794 imx_default_write(s, index, value); 795 break; 796 } 797} 798 799static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value) 800{ 801 switch (index) { 802 case ENET_RSFL: 803 case ENET_RSEM: 804 case ENET_RAEM: 805 case ENET_RAFL: 806 case ENET_TSEM: 807 case ENET_TAEM: 808 case ENET_TAFL: 809 s->regs[index] = value & 0x000001ff; 810 break; 811 case ENET_TIPG: 812 s->regs[index] = value & 0x0000001f; 813 break; 814 case ENET_FTRL: 815 s->regs[index] = value & 0x00003fff; 816 break; 817 case ENET_TACC: 818 s->regs[index] = value & 0x00000019; 819 break; 820 case ENET_RACC: 821 s->regs[index] = value & 0x000000C7; 822 break; 823 case ENET_ATCR: 824 s->regs[index] = value & 0x00002a9d; 825 break; 826 case ENET_ATVR: 827 case ENET_ATOFF: 828 case ENET_ATPER: 829 s->regs[index] = value; 830 break; 831 case ENET_ATSTMP: 832 /* ATSTMP is read only */ 833 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n", 834 TYPE_IMX_FEC, __func__); 835 break; 836 case ENET_ATCOR: 837 s->regs[index] = value & 0x7fffffff; 838 break; 839 case ENET_ATINC: 840 s->regs[index] = value & 0x00007f7f; 841 break; 842 case ENET_TGSR: 843 /* implement clear timer flag */ 844 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */ 845 break; 846 case ENET_TCSR0: 847 case ENET_TCSR1: 848 case ENET_TCSR2: 849 case ENET_TCSR3: 850 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */ 851 s->regs[index] &= ~0x0000007d; /* writable fields */ 852 s->regs[index] |= (value & 0x0000007d); 853 break; 854 case ENET_TCCR0: 855 case ENET_TCCR1: 856 case ENET_TCCR2: 857 case ENET_TCCR3: 858 s->regs[index] = value; 859 break; 860 default: 861 imx_default_write(s, index, value); 862 break; 863 } 864} 865 866static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value, 867 unsigned size) 868{ 869 IMXFECState *s = IMX_FEC(opaque); 870 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s); 871 uint32_t index = offset >> 2; 872 873 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value); 874 875 switch (index) { 876 case ENET_EIR: 877 s->regs[index] &= ~value; 878 break; 879 case ENET_EIMR: 880 s->regs[index] = value; 881 break; 882 case ENET_RDAR: 883 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { 884 if (!s->regs[index]) { 885 imx_eth_enable_rx(s, true); 886 } 887 } else { 888 s->regs[index] = 0; 889 } 890 break; 891 case ENET_TDAR1: 892 case ENET_TDAR2: 893 if (unlikely(single_tx_ring)) { 894 qemu_log_mask(LOG_GUEST_ERROR, 895 "[%s]%s: trying to access TDAR2 or TDAR1\n", 896 TYPE_IMX_FEC, __func__); 897 return; 898 } 899 /* fall through */ 900 case ENET_TDAR: 901 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { 902 s->regs[index] = ENET_TDAR_TDAR; 903 imx_eth_do_tx(s, index); 904 } 905 s->regs[index] = 0; 906 break; 907 case ENET_ECR: 908 if (value & ENET_ECR_RESET) { 909 return imx_eth_reset(DEVICE(s)); 910 } 911 s->regs[index] = value; 912 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) { 913 s->regs[ENET_RDAR] = 0; 914 s->rx_descriptor = s->regs[ENET_RDSR]; 915 s->regs[ENET_TDAR] = 0; 916 s->regs[ENET_TDAR1] = 0; 917 s->regs[ENET_TDAR2] = 0; 918 s->tx_descriptor[0] = s->regs[ENET_TDSR]; 919 s->tx_descriptor[1] = s->regs[ENET_TDSR1]; 920 s->tx_descriptor[2] = s->regs[ENET_TDSR2]; 921 } 922 break; 923 case ENET_MMFR: 924 s->regs[index] = value; 925 if (extract32(value, 29, 1)) { 926 /* This is a read operation */ 927 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16, 928 imx_phy_read(s, 929 extract32(value, 930 18, 10))); 931 } else { 932 /* This is a write operation */ 933 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16)); 934 } 935 /* raise the interrupt as the PHY operation is done */ 936 s->regs[ENET_EIR] |= ENET_INT_MII; 937 break; 938 case ENET_MSCR: 939 s->regs[index] = value & 0xfe; 940 break; 941 case ENET_MIBC: 942 /* TODO: Implement MIB. */ 943 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0; 944 break; 945 case ENET_RCR: 946 s->regs[index] = value & 0x07ff003f; 947 /* TODO: Implement LOOP mode. */ 948 break; 949 case ENET_TCR: 950 /* We transmit immediately, so raise GRA immediately. */ 951 s->regs[index] = value; 952 if (value & 1) { 953 s->regs[ENET_EIR] |= ENET_INT_GRA; 954 } 955 break; 956 case ENET_PALR: 957 s->regs[index] = value; 958 s->conf.macaddr.a[0] = value >> 24; 959 s->conf.macaddr.a[1] = value >> 16; 960 s->conf.macaddr.a[2] = value >> 8; 961 s->conf.macaddr.a[3] = value; 962 break; 963 case ENET_PAUR: 964 s->regs[index] = (value | 0x0000ffff) & 0xffff8808; 965 s->conf.macaddr.a[4] = value >> 24; 966 s->conf.macaddr.a[5] = value >> 16; 967 break; 968 case ENET_OPD: 969 s->regs[index] = (value & 0x0000ffff) | 0x00010000; 970 break; 971 case ENET_IAUR: 972 case ENET_IALR: 973 case ENET_GAUR: 974 case ENET_GALR: 975 /* TODO: implement MAC hash filtering. */ 976 break; 977 case ENET_TFWR: 978 if (s->is_fec) { 979 s->regs[index] = value & 0x3; 980 } else { 981 s->regs[index] = value & 0x13f; 982 } 983 break; 984 case ENET_RDSR: 985 if (s->is_fec) { 986 s->regs[index] = value & ~3; 987 } else { 988 s->regs[index] = value & ~7; 989 } 990 s->rx_descriptor = s->regs[index]; 991 break; 992 case ENET_TDSR: 993 if (s->is_fec) { 994 s->regs[index] = value & ~3; 995 } else { 996 s->regs[index] = value & ~7; 997 } 998 s->tx_descriptor[0] = s->regs[index]; 999 break; 1000 case ENET_TDSR1: 1001 if (unlikely(single_tx_ring)) { 1002 qemu_log_mask(LOG_GUEST_ERROR, 1003 "[%s]%s: trying to access TDSR1\n", 1004 TYPE_IMX_FEC, __func__); 1005 return; 1006 } 1007 1008 s->regs[index] = value & ~7; 1009 s->tx_descriptor[1] = s->regs[index]; 1010 break; 1011 case ENET_TDSR2: 1012 if (unlikely(single_tx_ring)) { 1013 qemu_log_mask(LOG_GUEST_ERROR, 1014 "[%s]%s: trying to access TDSR2\n", 1015 TYPE_IMX_FEC, __func__); 1016 return; 1017 } 1018 1019 s->regs[index] = value & ~7; 1020 s->tx_descriptor[2] = s->regs[index]; 1021 break; 1022 case ENET_MRBR: 1023 s->regs[index] = value & 0x00003ff0; 1024 break; 1025 default: 1026 if (s->is_fec) { 1027 imx_fec_write(s, index, value); 1028 } else { 1029 imx_enet_write(s, index, value); 1030 } 1031 return; 1032 } 1033 1034 imx_eth_update(s); 1035} 1036 1037static bool imx_eth_can_receive(NetClientState *nc) 1038{ 1039 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1040 1041 return !!s->regs[ENET_RDAR]; 1042} 1043 1044static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, 1045 size_t len) 1046{ 1047 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1048 IMXFECBufDesc bd; 1049 uint32_t flags = 0; 1050 uint32_t addr; 1051 uint32_t crc; 1052 uint32_t buf_addr; 1053 uint8_t *crc_ptr; 1054 unsigned int buf_len; 1055 size_t size = len; 1056 1057 trace_imx_fec_receive(size); 1058 1059 if (!s->regs[ENET_RDAR]) { 1060 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", 1061 TYPE_IMX_FEC, __func__); 1062 return 0; 1063 } 1064 1065 /* 4 bytes for the CRC. */ 1066 size += 4; 1067 crc = cpu_to_be32(crc32(~0, buf, size)); 1068 crc_ptr = (uint8_t *) &crc; 1069 1070 /* Huge frames are truncated. */ 1071 if (size > ENET_MAX_FRAME_SIZE) { 1072 size = ENET_MAX_FRAME_SIZE; 1073 flags |= ENET_BD_TR | ENET_BD_LG; 1074 } 1075 1076 /* Frames larger than the user limit just set error flags. */ 1077 if (size > (s->regs[ENET_RCR] >> 16)) { 1078 flags |= ENET_BD_LG; 1079 } 1080 1081 addr = s->rx_descriptor; 1082 while (size > 0) { 1083 imx_fec_read_bd(&bd, addr); 1084 if ((bd.flags & ENET_BD_E) == 0) { 1085 /* No descriptors available. Bail out. */ 1086 /* 1087 * FIXME: This is wrong. We should probably either 1088 * save the remainder for when more RX buffers are 1089 * available, or flag an error. 1090 */ 1091 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", 1092 TYPE_IMX_FEC, __func__); 1093 break; 1094 } 1095 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR]; 1096 bd.length = buf_len; 1097 size -= buf_len; 1098 1099 trace_imx_fec_receive_len(addr, bd.length); 1100 1101 /* The last 4 bytes are the CRC. */ 1102 if (size < 4) { 1103 buf_len += size - 4; 1104 } 1105 buf_addr = bd.data; 1106 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); 1107 buf += buf_len; 1108 if (size < 4) { 1109 dma_memory_write(&address_space_memory, buf_addr + buf_len, 1110 crc_ptr, 4 - size); 1111 crc_ptr += 4 - size; 1112 } 1113 bd.flags &= ~ENET_BD_E; 1114 if (size == 0) { 1115 /* Last buffer in frame. */ 1116 bd.flags |= flags | ENET_BD_L; 1117 1118 trace_imx_fec_receive_last(bd.flags); 1119 1120 s->regs[ENET_EIR] |= ENET_INT_RXF; 1121 } else { 1122 s->regs[ENET_EIR] |= ENET_INT_RXB; 1123 } 1124 imx_fec_write_bd(&bd, addr); 1125 /* Advance to the next descriptor. */ 1126 if ((bd.flags & ENET_BD_W) != 0) { 1127 addr = s->regs[ENET_RDSR]; 1128 } else { 1129 addr += sizeof(bd); 1130 } 1131 } 1132 s->rx_descriptor = addr; 1133 imx_eth_enable_rx(s, false); 1134 imx_eth_update(s); 1135 return len; 1136} 1137 1138static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, 1139 size_t len) 1140{ 1141 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1142 IMXENETBufDesc bd; 1143 uint32_t flags = 0; 1144 uint32_t addr; 1145 uint32_t crc; 1146 uint32_t buf_addr; 1147 uint8_t *crc_ptr; 1148 unsigned int buf_len; 1149 size_t size = len; 1150 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16; 1151 1152 trace_imx_enet_receive(size); 1153 1154 if (!s->regs[ENET_RDAR]) { 1155 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", 1156 TYPE_IMX_FEC, __func__); 1157 return 0; 1158 } 1159 1160 /* 4 bytes for the CRC. */ 1161 size += 4; 1162 crc = cpu_to_be32(crc32(~0, buf, size)); 1163 crc_ptr = (uint8_t *) &crc; 1164 1165 if (shift16) { 1166 size += 2; 1167 } 1168 1169 /* Huge frames are truncated. */ 1170 if (size > s->regs[ENET_FTRL]) { 1171 size = s->regs[ENET_FTRL]; 1172 flags |= ENET_BD_TR | ENET_BD_LG; 1173 } 1174 1175 /* Frames larger than the user limit just set error flags. */ 1176 if (size > (s->regs[ENET_RCR] >> 16)) { 1177 flags |= ENET_BD_LG; 1178 } 1179 1180 addr = s->rx_descriptor; 1181 while (size > 0) { 1182 imx_enet_read_bd(&bd, addr); 1183 if ((bd.flags & ENET_BD_E) == 0) { 1184 /* No descriptors available. Bail out. */ 1185 /* 1186 * FIXME: This is wrong. We should probably either 1187 * save the remainder for when more RX buffers are 1188 * available, or flag an error. 1189 */ 1190 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", 1191 TYPE_IMX_FEC, __func__); 1192 break; 1193 } 1194 buf_len = MIN(size, s->regs[ENET_MRBR]); 1195 bd.length = buf_len; 1196 size -= buf_len; 1197 1198 trace_imx_enet_receive_len(addr, bd.length); 1199 1200 /* The last 4 bytes are the CRC. */ 1201 if (size < 4) { 1202 buf_len += size - 4; 1203 } 1204 buf_addr = bd.data; 1205 1206 if (shift16) { 1207 /* 1208 * If SHIFT16 bit of ENETx_RACC register is set we need to 1209 * align the payload to 4-byte boundary. 1210 */ 1211 const uint8_t zeros[2] = { 0 }; 1212 1213 dma_memory_write(&address_space_memory, buf_addr, 1214 zeros, sizeof(zeros)); 1215 1216 buf_addr += sizeof(zeros); 1217 buf_len -= sizeof(zeros); 1218 1219 /* We only do this once per Ethernet frame */ 1220 shift16 = false; 1221 } 1222 1223 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); 1224 buf += buf_len; 1225 if (size < 4) { 1226 dma_memory_write(&address_space_memory, buf_addr + buf_len, 1227 crc_ptr, 4 - size); 1228 crc_ptr += 4 - size; 1229 } 1230 bd.flags &= ~ENET_BD_E; 1231 if (size == 0) { 1232 /* Last buffer in frame. */ 1233 bd.flags |= flags | ENET_BD_L; 1234 1235 trace_imx_enet_receive_last(bd.flags); 1236 1237 /* Indicate that we've updated the last buffer descriptor. */ 1238 bd.last_buffer = ENET_BD_BDU; 1239 if (bd.option & ENET_BD_RX_INT) { 1240 s->regs[ENET_EIR] |= ENET_INT_RXF; 1241 } 1242 } else { 1243 if (bd.option & ENET_BD_RX_INT) { 1244 s->regs[ENET_EIR] |= ENET_INT_RXB; 1245 } 1246 } 1247 imx_enet_write_bd(&bd, addr); 1248 /* Advance to the next descriptor. */ 1249 if ((bd.flags & ENET_BD_W) != 0) { 1250 addr = s->regs[ENET_RDSR]; 1251 } else { 1252 addr += sizeof(bd); 1253 } 1254 } 1255 s->rx_descriptor = addr; 1256 imx_eth_enable_rx(s, false); 1257 imx_eth_update(s); 1258 return len; 1259} 1260 1261static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf, 1262 size_t len) 1263{ 1264 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1265 1266 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { 1267 return imx_enet_receive(nc, buf, len); 1268 } else { 1269 return imx_fec_receive(nc, buf, len); 1270 } 1271} 1272 1273static const MemoryRegionOps imx_eth_ops = { 1274 .read = imx_eth_read, 1275 .write = imx_eth_write, 1276 .valid.min_access_size = 4, 1277 .valid.max_access_size = 4, 1278 .endianness = DEVICE_NATIVE_ENDIAN, 1279}; 1280 1281static void imx_eth_cleanup(NetClientState *nc) 1282{ 1283 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1284 1285 s->nic = NULL; 1286} 1287 1288static NetClientInfo imx_eth_net_info = { 1289 .type = NET_CLIENT_DRIVER_NIC, 1290 .size = sizeof(NICState), 1291 .can_receive = imx_eth_can_receive, 1292 .receive = imx_eth_receive, 1293 .cleanup = imx_eth_cleanup, 1294 .link_status_changed = imx_eth_set_link, 1295}; 1296 1297 1298static void imx_eth_realize(DeviceState *dev, Error **errp) 1299{ 1300 IMXFECState *s = IMX_FEC(dev); 1301 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1302 1303 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s, 1304 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE); 1305 sysbus_init_mmio(sbd, &s->iomem); 1306 sysbus_init_irq(sbd, &s->irq[0]); 1307 sysbus_init_irq(sbd, &s->irq[1]); 1308 1309 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1310 1311 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf, 1312 object_get_typename(OBJECT(dev)), 1313 dev->id, s); 1314 1315 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 1316} 1317 1318static Property imx_eth_properties[] = { 1319 DEFINE_NIC_PROPERTIES(IMXFECState, conf), 1320 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1), 1321 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0), 1322 DEFINE_PROP_END_OF_LIST(), 1323}; 1324 1325static void imx_eth_class_init(ObjectClass *klass, void *data) 1326{ 1327 DeviceClass *dc = DEVICE_CLASS(klass); 1328 1329 dc->vmsd = &vmstate_imx_eth; 1330 dc->reset = imx_eth_reset; 1331 device_class_set_props(dc, imx_eth_properties); 1332 dc->realize = imx_eth_realize; 1333 dc->desc = "i.MX FEC/ENET Ethernet Controller"; 1334} 1335 1336static void imx_fec_init(Object *obj) 1337{ 1338 IMXFECState *s = IMX_FEC(obj); 1339 1340 s->is_fec = true; 1341} 1342 1343static void imx_enet_init(Object *obj) 1344{ 1345 IMXFECState *s = IMX_FEC(obj); 1346 1347 s->is_fec = false; 1348} 1349 1350static const TypeInfo imx_fec_info = { 1351 .name = TYPE_IMX_FEC, 1352 .parent = TYPE_SYS_BUS_DEVICE, 1353 .instance_size = sizeof(IMXFECState), 1354 .instance_init = imx_fec_init, 1355 .class_init = imx_eth_class_init, 1356}; 1357 1358static const TypeInfo imx_enet_info = { 1359 .name = TYPE_IMX_ENET, 1360 .parent = TYPE_IMX_FEC, 1361 .instance_init = imx_enet_init, 1362}; 1363 1364static void imx_eth_register_types(void) 1365{ 1366 type_register_static(&imx_fec_info); 1367 type_register_static(&imx_enet_info); 1368} 1369 1370type_init(imx_eth_register_types)