2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
39 #define DEBUG_IMX_FEC 0
42 #define FEC_PRINTF(fmt, args...) \
44 if (DEBUG_IMX_FEC) { \
45 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
51 #define DEBUG_IMX_PHY 0
54 #define PHY_PRINTF(fmt, args...) \
56 if (DEBUG_IMX_PHY) { \
57 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
62 #define IMX_MAX_DESC 1024
64 static const char *imx_default_reg_name(IMXFECState
*s
, uint32_t index
)
67 sprintf(tmp
, "index %d", index
);
71 static const char *imx_fec_reg_name(IMXFECState
*s
, uint32_t index
)
78 case ENET_MIIGSK_CFGR
:
83 return imx_default_reg_name(s
, index
);
87 static const char *imx_enet_reg_name(IMXFECState
*s
, uint32_t index
)
145 return imx_default_reg_name(s
, index
);
149 static const char *imx_eth_reg_name(IMXFECState
*s
, uint32_t index
)
196 return imx_fec_reg_name(s
, index
);
198 return imx_enet_reg_name(s
, index
);
204 * Versions of this device with more than one TX descriptor save the
205 * 2nd and 3rd descriptors in a subsection, to maintain migration
206 * compatibility with previous versions of the device that only
207 * supported a single descriptor.
209 static bool imx_eth_is_multi_tx_ring(void *opaque
)
211 IMXFECState
*s
= IMX_FEC(opaque
);
213 return s
->tx_ring_num
> 1;
216 static const VMStateDescription vmstate_imx_eth_txdescs
= {
217 .name
= "imx.fec/txdescs",
219 .minimum_version_id
= 1,
220 .needed
= imx_eth_is_multi_tx_ring
,
221 .fields
= (VMStateField
[]) {
222 VMSTATE_UINT32(tx_descriptor
[1], IMXFECState
),
223 VMSTATE_UINT32(tx_descriptor
[2], IMXFECState
),
224 VMSTATE_END_OF_LIST()
228 static const VMStateDescription vmstate_imx_eth
= {
229 .name
= TYPE_IMX_FEC
,
231 .minimum_version_id
= 2,
232 .fields
= (VMStateField
[]) {
233 VMSTATE_UINT32_ARRAY(regs
, IMXFECState
, ENET_MAX
),
234 VMSTATE_UINT32(rx_descriptor
, IMXFECState
),
235 VMSTATE_UINT32(tx_descriptor
[0], IMXFECState
),
236 VMSTATE_UINT32(phy_status
, IMXFECState
),
237 VMSTATE_UINT32(phy_control
, IMXFECState
),
238 VMSTATE_UINT32(phy_advertise
, IMXFECState
),
239 VMSTATE_UINT32(phy_int
, IMXFECState
),
240 VMSTATE_UINT32(phy_int_mask
, IMXFECState
),
241 VMSTATE_END_OF_LIST()
243 .subsections
= (const VMStateDescription
* []) {
244 &vmstate_imx_eth_txdescs
,
249 #define PHY_INT_ENERGYON (1 << 7)
250 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
251 #define PHY_INT_FAULT (1 << 5)
252 #define PHY_INT_DOWN (1 << 4)
253 #define PHY_INT_AUTONEG_LP (1 << 3)
254 #define PHY_INT_PARFAULT (1 << 2)
255 #define PHY_INT_AUTONEG_PAGE (1 << 1)
257 static void imx_eth_update(IMXFECState
*s
);
260 * The MII phy could raise a GPIO to the processor which in turn
261 * could be handled as an interrpt by the OS.
262 * For now we don't handle any GPIO/interrupt line, so the OS will
263 * have to poll for the PHY status.
265 static void phy_update_irq(IMXFECState
*s
)
270 static void phy_update_link(IMXFECState
*s
)
272 /* Autonegotiation status mirrors link status. */
273 if (qemu_get_queue(s
->nic
)->link_down
) {
274 PHY_PRINTF("link is down\n");
275 s
->phy_status
&= ~0x0024;
276 s
->phy_int
|= PHY_INT_DOWN
;
278 PHY_PRINTF("link is up\n");
279 s
->phy_status
|= 0x0024;
280 s
->phy_int
|= PHY_INT_ENERGYON
;
281 s
->phy_int
|= PHY_INT_AUTONEG_COMPLETE
;
286 static void imx_eth_set_link(NetClientState
*nc
)
288 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc
)));
291 static void phy_reset(IMXFECState
*s
)
293 s
->phy_status
= 0x7809;
294 s
->phy_control
= 0x3000;
295 s
->phy_advertise
= 0x01e1;
301 static uint32_t do_phy_read(IMXFECState
*s
, int reg
)
306 /* we only advertise one phy */
311 case 0: /* Basic Control */
312 val
= s
->phy_control
;
314 case 1: /* Basic Status */
323 case 4: /* Auto-neg advertisement */
324 val
= s
->phy_advertise
;
326 case 5: /* Auto-neg Link Partner Ability */
329 case 6: /* Auto-neg Expansion */
332 case 29: /* Interrupt source. */
337 case 30: /* Interrupt mask */
338 val
= s
->phy_int_mask
;
344 qemu_log_mask(LOG_UNIMP
, "[%s.phy]%s: reg %d not implemented\n",
345 TYPE_IMX_FEC
, __func__
, reg
);
349 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
350 TYPE_IMX_FEC
, __func__
, reg
);
355 PHY_PRINTF("read 0x%04x @ %d\n", val
, reg
);
360 static void do_phy_write(IMXFECState
*s
, int reg
, uint32_t val
)
362 PHY_PRINTF("write 0x%04x @ %d\n", val
, reg
);
365 /* we only advertise one phy */
370 case 0: /* Basic Control */
374 s
->phy_control
= val
& 0x7980;
375 /* Complete autonegotiation immediately. */
377 s
->phy_status
|= 0x0020;
381 case 4: /* Auto-neg advertisement */
382 s
->phy_advertise
= (val
& 0x2d7f) | 0x80;
384 case 30: /* Interrupt mask */
385 s
->phy_int_mask
= val
& 0xff;
392 qemu_log_mask(LOG_UNIMP
, "[%s.phy)%s: reg %d not implemented\n",
393 TYPE_IMX_FEC
, __func__
, reg
);
396 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
397 TYPE_IMX_FEC
, __func__
, reg
);
402 static void imx_fec_read_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
404 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
407 static void imx_fec_write_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
409 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
412 static void imx_enet_read_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
414 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
417 static void imx_enet_write_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
419 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
422 static void imx_eth_update(IMXFECState
*s
)
425 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
426 * interrupts swapped. This worked with older versions of Linux (4.14
427 * and older) since Linux associated both interrupt lines with Ethernet
428 * MAC interrupts. Specifically,
429 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
430 * timer interrupts. Those versions of Linux fail with versions of QEMU
431 * with swapped interrupt assignments.
432 * - In linux 4.14, both interrupt lines were registered with the Ethernet
433 * MAC interrupt handler. As a result, all versions of qemu happen to
434 * work, though that is accidental.
435 * - In Linux 4.9 and older, the timer interrupt was registered directly
436 * with the Ethernet MAC interrupt handler. The MAC interrupt was
437 * redirected to a GPIO interrupt to work around erratum ERR006687.
438 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
439 * interrupt never fired since IOMUX is currently not supported in qemu.
440 * Linux instead received MAC interrupts on the timer interrupt.
441 * As a result, qemu versions with the swapped interrupt assignment work,
442 * albeit accidentally, but qemu versions with the correct interrupt
445 * To ensure that all versions of Linux work, generate ENET_INT_MAC
446 * interrrupts on both interrupt lines. This should be changed if and when
447 * qemu supports IOMUX.
449 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] &
450 (ENET_INT_MAC
| ENET_INT_TS_TIMER
)) {
451 qemu_set_irq(s
->irq
[1], 1);
453 qemu_set_irq(s
->irq
[1], 0);
456 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] & ENET_INT_MAC
) {
457 qemu_set_irq(s
->irq
[0], 1);
459 qemu_set_irq(s
->irq
[0], 0);
463 static void imx_fec_do_tx(IMXFECState
*s
)
465 int frame_size
= 0, descnt
= 0;
466 uint8_t *ptr
= s
->frame
;
467 uint32_t addr
= s
->tx_descriptor
[0];
469 while (descnt
++ < IMX_MAX_DESC
) {
473 imx_fec_read_bd(&bd
, addr
);
474 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
475 addr
, bd
.flags
, bd
.length
, bd
.data
);
476 if ((bd
.flags
& ENET_BD_R
) == 0) {
477 /* Run out of descriptors to transmit. */
478 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
482 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
483 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
484 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
486 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
489 if (bd
.flags
& ENET_BD_L
) {
490 /* Last buffer in frame. */
491 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
494 s
->regs
[ENET_EIR
] |= ENET_INT_TXF
;
496 s
->regs
[ENET_EIR
] |= ENET_INT_TXB
;
497 bd
.flags
&= ~ENET_BD_R
;
498 /* Write back the modified descriptor. */
499 imx_fec_write_bd(&bd
, addr
);
500 /* Advance to the next descriptor. */
501 if ((bd
.flags
& ENET_BD_W
) != 0) {
502 addr
= s
->regs
[ENET_TDSR
];
508 s
->tx_descriptor
[0] = addr
;
513 static void imx_enet_do_tx(IMXFECState
*s
, uint32_t index
)
515 int frame_size
= 0, descnt
= 0;
517 uint8_t *ptr
= s
->frame
;
518 uint32_t addr
, int_txb
, int_txf
, tdsr
;
524 int_txb
= ENET_INT_TXB
;
525 int_txf
= ENET_INT_TXF
;
530 int_txb
= ENET_INT_TXB1
;
531 int_txf
= ENET_INT_TXF1
;
536 int_txb
= ENET_INT_TXB2
;
537 int_txf
= ENET_INT_TXF2
;
541 qemu_log_mask(LOG_GUEST_ERROR
,
542 "%s: bogus value for index %x\n",
548 addr
= s
->tx_descriptor
[ring
];
550 while (descnt
++ < IMX_MAX_DESC
) {
554 imx_enet_read_bd(&bd
, addr
);
555 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
556 "status %04x\n", addr
, bd
.flags
, bd
.length
, bd
.data
,
557 bd
.option
, bd
.status
);
558 if ((bd
.flags
& ENET_BD_R
) == 0) {
559 /* Run out of descriptors to transmit. */
563 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
564 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
565 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
567 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
570 if (bd
.flags
& ENET_BD_L
) {
571 if (bd
.option
& ENET_BD_PINS
) {
572 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
573 if (IP_HEADER_VERSION(ip_hd
) == 4) {
574 net_checksum_calculate(s
->frame
, frame_size
);
577 if (bd
.option
& ENET_BD_IINS
) {
578 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
579 /* We compute checksum only for IPv4 frames */
580 if (IP_HEADER_VERSION(ip_hd
) == 4) {
583 csum
= net_raw_checksum((uint8_t *)ip_hd
, sizeof(*ip_hd
));
584 ip_hd
->ip_sum
= cpu_to_be16(csum
);
587 /* Last buffer in frame. */
589 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
593 if (bd
.option
& ENET_BD_TX_INT
) {
594 s
->regs
[ENET_EIR
] |= int_txf
;
596 /* Indicate that we've updated the last buffer descriptor. */
597 bd
.last_buffer
= ENET_BD_BDU
;
599 if (bd
.option
& ENET_BD_TX_INT
) {
600 s
->regs
[ENET_EIR
] |= int_txb
;
602 bd
.flags
&= ~ENET_BD_R
;
603 /* Write back the modified descriptor. */
604 imx_enet_write_bd(&bd
, addr
);
605 /* Advance to the next descriptor. */
606 if ((bd
.flags
& ENET_BD_W
) != 0) {
607 addr
= s
->regs
[tdsr
];
613 s
->tx_descriptor
[ring
] = addr
;
618 static void imx_eth_do_tx(IMXFECState
*s
, uint32_t index
)
620 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
621 imx_enet_do_tx(s
, index
);
627 static void imx_eth_enable_rx(IMXFECState
*s
, bool flush
)
631 imx_fec_read_bd(&bd
, s
->rx_descriptor
);
633 s
->regs
[ENET_RDAR
] = (bd
.flags
& ENET_BD_E
) ? ENET_RDAR_RDAR
: 0;
635 if (!s
->regs
[ENET_RDAR
]) {
636 FEC_PRINTF("RX buffer full\n");
638 qemu_flush_queued_packets(qemu_get_queue(s
->nic
));
642 static void imx_eth_reset(DeviceState
*d
)
644 IMXFECState
*s
= IMX_FEC(d
);
646 /* Reset the Device */
647 memset(s
->regs
, 0, sizeof(s
->regs
));
648 s
->regs
[ENET_ECR
] = 0xf0000000;
649 s
->regs
[ENET_MIBC
] = 0xc0000000;
650 s
->regs
[ENET_RCR
] = 0x05ee0001;
651 s
->regs
[ENET_OPD
] = 0x00010000;
653 s
->regs
[ENET_PALR
] = (s
->conf
.macaddr
.a
[0] << 24)
654 | (s
->conf
.macaddr
.a
[1] << 16)
655 | (s
->conf
.macaddr
.a
[2] << 8)
656 | s
->conf
.macaddr
.a
[3];
657 s
->regs
[ENET_PAUR
] = (s
->conf
.macaddr
.a
[4] << 24)
658 | (s
->conf
.macaddr
.a
[5] << 16)
662 s
->regs
[ENET_FRBR
] = 0x00000600;
663 s
->regs
[ENET_FRSR
] = 0x00000500;
664 s
->regs
[ENET_MIIGSK_ENR
] = 0x00000006;
666 s
->regs
[ENET_RAEM
] = 0x00000004;
667 s
->regs
[ENET_RAFL
] = 0x00000004;
668 s
->regs
[ENET_TAEM
] = 0x00000004;
669 s
->regs
[ENET_TAFL
] = 0x00000008;
670 s
->regs
[ENET_TIPG
] = 0x0000000c;
671 s
->regs
[ENET_FTRL
] = 0x000007ff;
672 s
->regs
[ENET_ATPER
] = 0x3b9aca00;
675 s
->rx_descriptor
= 0;
676 memset(s
->tx_descriptor
, 0, sizeof(s
->tx_descriptor
));
678 /* We also reset the PHY */
682 static uint32_t imx_default_read(IMXFECState
*s
, uint32_t index
)
684 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad register at offset 0x%"
685 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
689 static uint32_t imx_fec_read(IMXFECState
*s
, uint32_t index
)
694 case ENET_MIIGSK_CFGR
:
695 case ENET_MIIGSK_ENR
:
696 return s
->regs
[index
];
698 return imx_default_read(s
, index
);
702 static uint32_t imx_enet_read(IMXFECState
*s
, uint32_t index
)
732 return s
->regs
[index
];
734 return imx_default_read(s
, index
);
738 static uint64_t imx_eth_read(void *opaque
, hwaddr offset
, unsigned size
)
741 IMXFECState
*s
= IMX_FEC(opaque
);
742 uint32_t index
= offset
>> 2;
766 value
= s
->regs
[index
];
770 value
= imx_fec_read(s
, index
);
772 value
= imx_enet_read(s
, index
);
777 FEC_PRINTF("reg[%s] => 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
783 static void imx_default_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
785 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad address at offset 0x%"
786 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
790 static void imx_fec_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
794 /* FRBR is read only */
795 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register FRBR is read only\n",
796 TYPE_IMX_FEC
, __func__
);
799 s
->regs
[index
] = (value
& 0x000003fc) | 0x00000400;
801 case ENET_MIIGSK_CFGR
:
802 s
->regs
[index
] = value
& 0x00000053;
804 case ENET_MIIGSK_ENR
:
805 s
->regs
[index
] = (value
& 0x00000002) ? 0x00000006 : 0;
808 imx_default_write(s
, index
, value
);
813 static void imx_enet_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
823 s
->regs
[index
] = value
& 0x000001ff;
826 s
->regs
[index
] = value
& 0x0000001f;
829 s
->regs
[index
] = value
& 0x00003fff;
832 s
->regs
[index
] = value
& 0x00000019;
835 s
->regs
[index
] = value
& 0x000000C7;
838 s
->regs
[index
] = value
& 0x00002a9d;
843 s
->regs
[index
] = value
;
846 /* ATSTMP is read only */
847 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register ATSTMP is read only\n",
848 TYPE_IMX_FEC
, __func__
);
851 s
->regs
[index
] = value
& 0x7fffffff;
854 s
->regs
[index
] = value
& 0x00007f7f;
857 /* implement clear timer flag */
858 value
= value
& 0x0000000f;
864 value
= value
& 0x000000fd;
870 s
->regs
[index
] = value
;
873 imx_default_write(s
, index
, value
);
878 static void imx_eth_write(void *opaque
, hwaddr offset
, uint64_t value
,
881 IMXFECState
*s
= IMX_FEC(opaque
);
882 const bool single_tx_ring
= !imx_eth_is_multi_tx_ring(s
);
883 uint32_t index
= offset
>> 2;
885 FEC_PRINTF("reg[%s] <= 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
890 s
->regs
[index
] &= ~value
;
893 s
->regs
[index
] = value
;
896 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
897 if (!s
->regs
[index
]) {
898 imx_eth_enable_rx(s
, true);
904 case ENET_TDAR1
: /* FALLTHROUGH */
905 case ENET_TDAR2
: /* FALLTHROUGH */
906 if (unlikely(single_tx_ring
)) {
907 qemu_log_mask(LOG_GUEST_ERROR
,
908 "[%s]%s: trying to access TDAR2 or TDAR1\n",
909 TYPE_IMX_FEC
, __func__
);
912 case ENET_TDAR
: /* FALLTHROUGH */
913 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
914 s
->regs
[index
] = ENET_TDAR_TDAR
;
915 imx_eth_do_tx(s
, index
);
920 if (value
& ENET_ECR_RESET
) {
921 return imx_eth_reset(DEVICE(s
));
923 s
->regs
[index
] = value
;
924 if ((s
->regs
[index
] & ENET_ECR_ETHEREN
) == 0) {
925 s
->regs
[ENET_RDAR
] = 0;
926 s
->rx_descriptor
= s
->regs
[ENET_RDSR
];
927 s
->regs
[ENET_TDAR
] = 0;
928 s
->regs
[ENET_TDAR1
] = 0;
929 s
->regs
[ENET_TDAR2
] = 0;
930 s
->tx_descriptor
[0] = s
->regs
[ENET_TDSR
];
931 s
->tx_descriptor
[1] = s
->regs
[ENET_TDSR1
];
932 s
->tx_descriptor
[2] = s
->regs
[ENET_TDSR2
];
936 s
->regs
[index
] = value
;
937 if (extract32(value
, 29, 1)) {
938 /* This is a read operation */
939 s
->regs
[ENET_MMFR
] = deposit32(s
->regs
[ENET_MMFR
], 0, 16,
944 /* This a write operation */
945 do_phy_write(s
, extract32(value
, 18, 10), extract32(value
, 0, 16));
947 /* raise the interrupt as the PHY operation is done */
948 s
->regs
[ENET_EIR
] |= ENET_INT_MII
;
951 s
->regs
[index
] = value
& 0xfe;
954 /* TODO: Implement MIB. */
955 s
->regs
[index
] = (value
& 0x80000000) ? 0xc0000000 : 0;
958 s
->regs
[index
] = value
& 0x07ff003f;
959 /* TODO: Implement LOOP mode. */
962 /* We transmit immediately, so raise GRA immediately. */
963 s
->regs
[index
] = value
;
965 s
->regs
[ENET_EIR
] |= ENET_INT_GRA
;
969 s
->regs
[index
] = value
;
970 s
->conf
.macaddr
.a
[0] = value
>> 24;
971 s
->conf
.macaddr
.a
[1] = value
>> 16;
972 s
->conf
.macaddr
.a
[2] = value
>> 8;
973 s
->conf
.macaddr
.a
[3] = value
;
976 s
->regs
[index
] = (value
| 0x0000ffff) & 0xffff8808;
977 s
->conf
.macaddr
.a
[4] = value
>> 24;
978 s
->conf
.macaddr
.a
[5] = value
>> 16;
981 s
->regs
[index
] = (value
& 0x0000ffff) | 0x00010000;
987 /* TODO: implement MAC hash filtering. */
991 s
->regs
[index
] = value
& 0x3;
993 s
->regs
[index
] = value
& 0x13f;
998 s
->regs
[index
] = value
& ~3;
1000 s
->regs
[index
] = value
& ~7;
1002 s
->rx_descriptor
= s
->regs
[index
];
1006 s
->regs
[index
] = value
& ~3;
1008 s
->regs
[index
] = value
& ~7;
1010 s
->tx_descriptor
[0] = s
->regs
[index
];
1013 if (unlikely(single_tx_ring
)) {
1014 qemu_log_mask(LOG_GUEST_ERROR
,
1015 "[%s]%s: trying to access TDSR1\n",
1016 TYPE_IMX_FEC
, __func__
);
1020 s
->regs
[index
] = value
& ~7;
1021 s
->tx_descriptor
[1] = s
->regs
[index
];
1024 if (unlikely(single_tx_ring
)) {
1025 qemu_log_mask(LOG_GUEST_ERROR
,
1026 "[%s]%s: trying to access TDSR2\n",
1027 TYPE_IMX_FEC
, __func__
);
1031 s
->regs
[index
] = value
& ~7;
1032 s
->tx_descriptor
[2] = s
->regs
[index
];
1035 s
->regs
[index
] = value
& 0x00003ff0;
1039 imx_fec_write(s
, index
, value
);
1041 imx_enet_write(s
, index
, value
);
1049 static int imx_eth_can_receive(NetClientState
*nc
)
1051 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1055 return !!s
->regs
[ENET_RDAR
];
1058 static ssize_t
imx_fec_receive(NetClientState
*nc
, const uint8_t *buf
,
1061 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1068 unsigned int buf_len
;
1071 FEC_PRINTF("len %d\n", (int)size
);
1073 if (!s
->regs
[ENET_RDAR
]) {
1074 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1075 TYPE_IMX_FEC
, __func__
);
1079 /* 4 bytes for the CRC. */
1081 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1082 crc_ptr
= (uint8_t *) &crc
;
1084 /* Huge frames are truncated. */
1085 if (size
> ENET_MAX_FRAME_SIZE
) {
1086 size
= ENET_MAX_FRAME_SIZE
;
1087 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1090 /* Frames larger than the user limit just set error flags. */
1091 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1092 flags
|= ENET_BD_LG
;
1095 addr
= s
->rx_descriptor
;
1097 imx_fec_read_bd(&bd
, addr
);
1098 if ((bd
.flags
& ENET_BD_E
) == 0) {
1099 /* No descriptors available. Bail out. */
1101 * FIXME: This is wrong. We should probably either
1102 * save the remainder for when more RX buffers are
1103 * available, or flag an error.
1105 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1106 TYPE_IMX_FEC
, __func__
);
1109 buf_len
= (size
<= s
->regs
[ENET_MRBR
]) ? size
: s
->regs
[ENET_MRBR
];
1110 bd
.length
= buf_len
;
1113 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1115 /* The last 4 bytes are the CRC. */
1117 buf_len
+= size
- 4;
1120 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1123 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1125 crc_ptr
+= 4 - size
;
1127 bd
.flags
&= ~ENET_BD_E
;
1129 /* Last buffer in frame. */
1130 bd
.flags
|= flags
| ENET_BD_L
;
1131 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1132 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1134 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1136 imx_fec_write_bd(&bd
, addr
);
1137 /* Advance to the next descriptor. */
1138 if ((bd
.flags
& ENET_BD_W
) != 0) {
1139 addr
= s
->regs
[ENET_RDSR
];
1144 s
->rx_descriptor
= addr
;
1145 imx_eth_enable_rx(s
, false);
1150 static ssize_t
imx_enet_receive(NetClientState
*nc
, const uint8_t *buf
,
1153 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1160 unsigned int buf_len
;
1162 bool shift16
= s
->regs
[ENET_RACC
] & ENET_RACC_SHIFT16
;
1164 FEC_PRINTF("len %d\n", (int)size
);
1166 if (!s
->regs
[ENET_RDAR
]) {
1167 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1168 TYPE_IMX_FEC
, __func__
);
1172 /* 4 bytes for the CRC. */
1174 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1175 crc_ptr
= (uint8_t *) &crc
;
1181 /* Huge frames are truncated. */
1182 if (size
> s
->regs
[ENET_FTRL
]) {
1183 size
= s
->regs
[ENET_FTRL
];
1184 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1187 /* Frames larger than the user limit just set error flags. */
1188 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1189 flags
|= ENET_BD_LG
;
1192 addr
= s
->rx_descriptor
;
1194 imx_enet_read_bd(&bd
, addr
);
1195 if ((bd
.flags
& ENET_BD_E
) == 0) {
1196 /* No descriptors available. Bail out. */
1198 * FIXME: This is wrong. We should probably either
1199 * save the remainder for when more RX buffers are
1200 * available, or flag an error.
1202 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1203 TYPE_IMX_FEC
, __func__
);
1206 buf_len
= MIN(size
, s
->regs
[ENET_MRBR
]);
1207 bd
.length
= buf_len
;
1210 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1212 /* The last 4 bytes are the CRC. */
1214 buf_len
+= size
- 4;
1220 * If SHIFT16 bit of ENETx_RACC register is set we need to
1221 * align the payload to 4-byte boundary.
1223 const uint8_t zeros
[2] = { 0 };
1225 dma_memory_write(&address_space_memory
, buf_addr
,
1226 zeros
, sizeof(zeros
));
1228 buf_addr
+= sizeof(zeros
);
1229 buf_len
-= sizeof(zeros
);
1231 /* We only do this once per Ethernet frame */
1235 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1238 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1240 crc_ptr
+= 4 - size
;
1242 bd
.flags
&= ~ENET_BD_E
;
1244 /* Last buffer in frame. */
1245 bd
.flags
|= flags
| ENET_BD_L
;
1246 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1247 /* Indicate that we've updated the last buffer descriptor. */
1248 bd
.last_buffer
= ENET_BD_BDU
;
1249 if (bd
.option
& ENET_BD_RX_INT
) {
1250 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1253 if (bd
.option
& ENET_BD_RX_INT
) {
1254 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1257 imx_enet_write_bd(&bd
, addr
);
1258 /* Advance to the next descriptor. */
1259 if ((bd
.flags
& ENET_BD_W
) != 0) {
1260 addr
= s
->regs
[ENET_RDSR
];
1265 s
->rx_descriptor
= addr
;
1266 imx_eth_enable_rx(s
, false);
1271 static ssize_t
imx_eth_receive(NetClientState
*nc
, const uint8_t *buf
,
1274 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1276 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
1277 return imx_enet_receive(nc
, buf
, len
);
1279 return imx_fec_receive(nc
, buf
, len
);
1283 static const MemoryRegionOps imx_eth_ops
= {
1284 .read
= imx_eth_read
,
1285 .write
= imx_eth_write
,
1286 .valid
.min_access_size
= 4,
1287 .valid
.max_access_size
= 4,
1288 .endianness
= DEVICE_NATIVE_ENDIAN
,
1291 static void imx_eth_cleanup(NetClientState
*nc
)
1293 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1298 static NetClientInfo imx_eth_net_info
= {
1299 .type
= NET_CLIENT_DRIVER_NIC
,
1300 .size
= sizeof(NICState
),
1301 .can_receive
= imx_eth_can_receive
,
1302 .receive
= imx_eth_receive
,
1303 .cleanup
= imx_eth_cleanup
,
1304 .link_status_changed
= imx_eth_set_link
,
1308 static void imx_eth_realize(DeviceState
*dev
, Error
**errp
)
1310 IMXFECState
*s
= IMX_FEC(dev
);
1311 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1313 memory_region_init_io(&s
->iomem
, OBJECT(dev
), &imx_eth_ops
, s
,
1314 TYPE_IMX_FEC
, FSL_IMX25_FEC_SIZE
);
1315 sysbus_init_mmio(sbd
, &s
->iomem
);
1316 sysbus_init_irq(sbd
, &s
->irq
[0]);
1317 sysbus_init_irq(sbd
, &s
->irq
[1]);
1319 qemu_macaddr_default_if_unset(&s
->conf
.macaddr
);
1321 s
->nic
= qemu_new_nic(&imx_eth_net_info
, &s
->conf
,
1322 object_get_typename(OBJECT(dev
)),
1323 DEVICE(dev
)->id
, s
);
1325 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
1328 static Property imx_eth_properties
[] = {
1329 DEFINE_NIC_PROPERTIES(IMXFECState
, conf
),
1330 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState
, tx_ring_num
, 1),
1331 DEFINE_PROP_END_OF_LIST(),
1334 static void imx_eth_class_init(ObjectClass
*klass
, void *data
)
1336 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1338 dc
->vmsd
= &vmstate_imx_eth
;
1339 dc
->reset
= imx_eth_reset
;
1340 dc
->props
= imx_eth_properties
;
1341 dc
->realize
= imx_eth_realize
;
1342 dc
->desc
= "i.MX FEC/ENET Ethernet Controller";
1345 static void imx_fec_init(Object
*obj
)
1347 IMXFECState
*s
= IMX_FEC(obj
);
1352 static void imx_enet_init(Object
*obj
)
1354 IMXFECState
*s
= IMX_FEC(obj
);
1359 static const TypeInfo imx_fec_info
= {
1360 .name
= TYPE_IMX_FEC
,
1361 .parent
= TYPE_SYS_BUS_DEVICE
,
1362 .instance_size
= sizeof(IMXFECState
),
1363 .instance_init
= imx_fec_init
,
1364 .class_init
= imx_eth_class_init
,
1367 static const TypeInfo imx_enet_info
= {
1368 .name
= TYPE_IMX_ENET
,
1369 .parent
= TYPE_IMX_FEC
,
1370 .instance_init
= imx_enet_init
,
1373 static void imx_eth_register_types(void)
1375 type_register_static(&imx_fec_info
);
1376 type_register_static(&imx_enet_info
);
1379 type_init(imx_eth_register_types
)