4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
36 DEBUG_GENERAL
, DEBUG_IO
, DEBUG_MMIO
, DEBUG_INTERRUPT
,
37 DEBUG_RX
, DEBUG_TX
, DEBUG_MDIC
, DEBUG_EEPROM
,
38 DEBUG_UNKNOWN
, DEBUG_TXSUM
, DEBUG_TXERR
, DEBUG_RXERR
,
39 DEBUG_RXFILTER
, DEBUG_NOTYET
,
41 #define DBGBIT(x) (1<<DEBUG_##x)
42 static int debugflags
= DBGBIT(TXERR
) | DBGBIT(GENERAL
);
44 #define DBGOUT(what, fmt, ...) do { \
45 if (debugflags & DBGBIT(what)) \
46 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
49 #define DBGOUT(what, fmt, ...) do {} while (0)
52 #define IOPORT_SIZE 0x40
53 #define PNPMMIO_SIZE 0x20000
57 * E1000_DEV_ID_82540EM works with Windows and Linux
58 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
59 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
60 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
63 enum { E1000_DEVID
= E1000_DEV_ID_82540EM
};
66 * May need to specify additional MAC-to-PHY entries --
67 * Intel's Windows driver refuses to initialize unless they match
70 PHY_ID2_INIT
= E1000_DEVID
== E1000_DEV_ID_82573L
? 0xcc2 :
71 E1000_DEVID
== E1000_DEV_ID_82544GC_COPPER
? 0xc30 :
72 /* default to E1000_DEV_ID_82540EM */ 0xc20
75 typedef struct E1000State_st
{
80 uint32_t mac_reg
[0x8000];
81 uint16_t phy_reg
[0x20];
82 uint16_t eeprom_data
[64];
85 uint32_t rxbuf_min_shift
;
88 unsigned char header
[256];
89 unsigned char vlan_header
[4];
90 unsigned char vlan
[4];
91 unsigned char data
[0x10000];
93 unsigned char sum_needed
;
94 unsigned char vlan_needed
;
108 char cptse
; // current packet tse bit
112 uint32_t val_in
; // shifted in from guest driver
120 #define defreg(x) x = (E1000_##x>>2)
122 defreg(CTRL
), defreg(EECD
), defreg(EERD
), defreg(GPRC
),
123 defreg(GPTC
), defreg(ICR
), defreg(ICS
), defreg(IMC
),
124 defreg(IMS
), defreg(LEDCTL
), defreg(MANC
), defreg(MDIC
),
125 defreg(MPC
), defreg(PBA
), defreg(RCTL
), defreg(RDBAH
),
126 defreg(RDBAL
), defreg(RDH
), defreg(RDLEN
), defreg(RDT
),
127 defreg(STATUS
), defreg(SWSM
), defreg(TCTL
), defreg(TDBAH
),
128 defreg(TDBAL
), defreg(TDH
), defreg(TDLEN
), defreg(TDT
),
129 defreg(TORH
), defreg(TORL
), defreg(TOTH
), defreg(TOTL
),
130 defreg(TPR
), defreg(TPT
), defreg(TXDCTL
), defreg(WUFC
),
131 defreg(RA
), defreg(MTA
), defreg(CRCERRS
),defreg(VFTA
),
135 enum { PHY_R
= 1, PHY_W
= 2, PHY_RW
= PHY_R
| PHY_W
};
136 static const char phy_regcap
[0x20] = {
137 [PHY_STATUS
] = PHY_R
, [M88E1000_EXT_PHY_SPEC_CTRL
] = PHY_RW
,
138 [PHY_ID1
] = PHY_R
, [M88E1000_PHY_SPEC_CTRL
] = PHY_RW
,
139 [PHY_CTRL
] = PHY_RW
, [PHY_1000T_CTRL
] = PHY_RW
,
140 [PHY_LP_ABILITY
] = PHY_R
, [PHY_1000T_STATUS
] = PHY_R
,
141 [PHY_AUTONEG_ADV
] = PHY_RW
, [M88E1000_RX_ERR_CNTR
] = PHY_R
,
142 [PHY_ID2
] = PHY_R
, [M88E1000_PHY_SPEC_STATUS
] = PHY_R
146 ioport_map(PCIDevice
*pci_dev
, int region_num
, uint32_t addr
,
147 uint32_t size
, int type
)
149 DBGOUT(IO
, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr
, size
);
153 update_irqs(E1000State
*s
)
155 qemu_set_irq(s
->dev
.irq
[0], (s
->mac_reg
[IMS
] & s
->mac_reg
[ICR
]) != 0);
159 set_interrupt_cause(E1000State
*s
, int index
, uint32_t val
)
162 val
|= E1000_ICR_INT_ASSERTED
;
163 s
->mac_reg
[ICR
] = val
;
168 set_ics(E1000State
*s
, int index
, uint32_t val
)
170 DBGOUT(INTERRUPT
, "set_ics %x, ICR %x, IMR %x\n", val
, s
->mac_reg
[ICR
],
172 set_interrupt_cause(s
, 0, val
| s
->mac_reg
[ICR
]);
176 rxbufsize(uint32_t v
)
178 v
&= E1000_RCTL_BSEX
| E1000_RCTL_SZ_16384
| E1000_RCTL_SZ_8192
|
179 E1000_RCTL_SZ_4096
| E1000_RCTL_SZ_2048
| E1000_RCTL_SZ_1024
|
180 E1000_RCTL_SZ_512
| E1000_RCTL_SZ_256
;
182 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_16384
:
184 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_8192
:
186 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_4096
:
188 case E1000_RCTL_SZ_1024
:
190 case E1000_RCTL_SZ_512
:
192 case E1000_RCTL_SZ_256
:
199 set_ctrl(E1000State
*s
, int index
, uint32_t val
)
201 /* RST is self clearing */
202 s
->mac_reg
[CTRL
] = val
& ~E1000_CTRL_RST
;
206 set_rx_control(E1000State
*s
, int index
, uint32_t val
)
208 s
->mac_reg
[RCTL
] = val
;
209 s
->rxbuf_size
= rxbufsize(val
);
210 s
->rxbuf_min_shift
= ((val
/ E1000_RCTL_RDMTS_QUAT
) & 3) + 1;
211 DBGOUT(RX
, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s
->mac_reg
[RDT
],
216 set_mdic(E1000State
*s
, int index
, uint32_t val
)
218 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
219 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
221 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) // phy #
222 val
= s
->mac_reg
[MDIC
] | E1000_MDIC_ERROR
;
223 else if (val
& E1000_MDIC_OP_READ
) {
224 DBGOUT(MDIC
, "MDIC read reg 0x%x\n", addr
);
225 if (!(phy_regcap
[addr
] & PHY_R
)) {
226 DBGOUT(MDIC
, "MDIC read reg %x unhandled\n", addr
);
227 val
|= E1000_MDIC_ERROR
;
229 val
= (val
^ data
) | s
->phy_reg
[addr
];
230 } else if (val
& E1000_MDIC_OP_WRITE
) {
231 DBGOUT(MDIC
, "MDIC write reg 0x%x, value 0x%x\n", addr
, data
);
232 if (!(phy_regcap
[addr
] & PHY_W
)) {
233 DBGOUT(MDIC
, "MDIC write reg %x unhandled\n", addr
);
234 val
|= E1000_MDIC_ERROR
;
236 s
->phy_reg
[addr
] = data
;
238 s
->mac_reg
[MDIC
] = val
| E1000_MDIC_READY
;
239 set_ics(s
, 0, E1000_ICR_MDAC
);
243 get_eecd(E1000State
*s
, int index
)
245 uint32_t ret
= E1000_EECD_PRES
|E1000_EECD_GNT
| s
->eecd_state
.old_eecd
;
247 DBGOUT(EEPROM
, "reading eeprom bit %d (reading %d)\n",
248 s
->eecd_state
.bitnum_out
, s
->eecd_state
.reading
);
249 if (!s
->eecd_state
.reading
||
250 ((s
->eeprom_data
[(s
->eecd_state
.bitnum_out
>> 4) & 0x3f] >>
251 ((s
->eecd_state
.bitnum_out
& 0xf) ^ 0xf))) & 1)
252 ret
|= E1000_EECD_DO
;
257 set_eecd(E1000State
*s
, int index
, uint32_t val
)
259 uint32_t oldval
= s
->eecd_state
.old_eecd
;
261 s
->eecd_state
.old_eecd
= val
& (E1000_EECD_SK
| E1000_EECD_CS
|
262 E1000_EECD_DI
|E1000_EECD_FWE_MASK
|E1000_EECD_REQ
);
263 if (!(E1000_EECD_SK
& (val
^ oldval
))) // no clock edge
265 if (!(E1000_EECD_SK
& val
)) { // falling edge
266 s
->eecd_state
.bitnum_out
++;
269 if (!(val
& E1000_EECD_CS
)) { // rising, no CS (EEPROM reset)
270 memset(&s
->eecd_state
, 0, sizeof s
->eecd_state
);
273 s
->eecd_state
.val_in
<<= 1;
274 if (val
& E1000_EECD_DI
)
275 s
->eecd_state
.val_in
|= 1;
276 if (++s
->eecd_state
.bitnum_in
== 9 && !s
->eecd_state
.reading
) {
277 s
->eecd_state
.bitnum_out
= ((s
->eecd_state
.val_in
& 0x3f)<<4)-1;
278 s
->eecd_state
.reading
= (((s
->eecd_state
.val_in
>> 6) & 7) ==
279 EEPROM_READ_OPCODE_MICROWIRE
);
281 DBGOUT(EEPROM
, "eeprom bitnum in %d out %d, reading %d\n",
282 s
->eecd_state
.bitnum_in
, s
->eecd_state
.bitnum_out
,
283 s
->eecd_state
.reading
);
287 flash_eerd_read(E1000State
*s
, int x
)
289 unsigned int index
, r
= s
->mac_reg
[EERD
] & ~E1000_EEPROM_RW_REG_START
;
291 if ((index
= r
>> E1000_EEPROM_RW_ADDR_SHIFT
) > EEPROM_CHECKSUM_REG
)
293 return (s
->eeprom_data
[index
] << E1000_EEPROM_RW_REG_DATA
) |
294 E1000_EEPROM_RW_REG_DONE
| r
;
298 putsum(uint8_t *data
, uint32_t n
, uint32_t sloc
, uint32_t css
, uint32_t cse
)
305 sum
= net_checksum_add(n
-css
, data
+css
);
306 cpu_to_be16wu((uint16_t *)(data
+ sloc
),
307 net_checksum_finish(sum
));
312 vlan_enabled(E1000State
*s
)
314 return ((s
->mac_reg
[CTRL
] & E1000_CTRL_VME
) != 0);
318 vlan_rx_filter_enabled(E1000State
*s
)
320 return ((s
->mac_reg
[RCTL
] & E1000_RCTL_VFE
) != 0);
324 is_vlan_packet(E1000State
*s
, const uint8_t *buf
)
326 return (be16_to_cpup((uint16_t *)(buf
+ 12)) ==
327 le16_to_cpup((uint16_t *)(s
->mac_reg
+ VET
)));
331 is_vlan_txd(uint32_t txd_lower
)
333 return ((txd_lower
& E1000_TXD_CMD_VLE
) != 0);
337 xmit_seg(E1000State
*s
)
340 unsigned int frames
= s
->tx
.tso_frames
, css
, sofar
, n
;
341 struct e1000_tx
*tp
= &s
->tx
;
343 if (tp
->tse
&& tp
->cptse
) {
345 DBGOUT(TXSUM
, "frames %d size %d ipcss %d\n",
346 frames
, tp
->size
, css
);
347 if (tp
->ip
) { // IPv4
348 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+2),
350 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4),
351 be16_to_cpup((uint16_t *)(tp
->data
+css
+4))+frames
);
353 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4),
356 len
= tp
->size
- css
;
357 DBGOUT(TXSUM
, "tcp %d tucss %d len %d\n", tp
->tcp
, css
, len
);
359 sofar
= frames
* tp
->mss
;
360 cpu_to_be32wu((uint32_t *)(tp
->data
+css
+4), // seq
361 be32_to_cpupu((uint32_t *)(tp
->data
+css
+4))+sofar
);
362 if (tp
->paylen
- sofar
> tp
->mss
)
363 tp
->data
[css
+ 13] &= ~9; // PSH, FIN
365 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4), len
);
366 if (tp
->sum_needed
& E1000_TXD_POPTS_TXSM
) {
367 // add pseudo-header length before checksum calculation
368 sp
= (uint16_t *)(tp
->data
+ tp
->tucso
);
369 cpu_to_be16wu(sp
, be16_to_cpup(sp
) + len
);
374 if (tp
->sum_needed
& E1000_TXD_POPTS_TXSM
)
375 putsum(tp
->data
, tp
->size
, tp
->tucso
, tp
->tucss
, tp
->tucse
);
376 if (tp
->sum_needed
& E1000_TXD_POPTS_IXSM
)
377 putsum(tp
->data
, tp
->size
, tp
->ipcso
, tp
->ipcss
, tp
->ipcse
);
378 if (tp
->vlan_needed
) {
379 memmove(tp
->vlan
, tp
->data
, 12);
380 memcpy(tp
->data
+ 8, tp
->vlan_header
, 4);
381 qemu_send_packet(s
->vc
, tp
->vlan
, tp
->size
+ 4);
383 qemu_send_packet(s
->vc
, tp
->data
, tp
->size
);
386 n
= s
->mac_reg
[TOTL
];
387 if ((s
->mac_reg
[TOTL
] += s
->tx
.size
) < n
)
392 process_tx_desc(E1000State
*s
, struct e1000_tx_desc
*dp
)
394 uint32_t txd_lower
= le32_to_cpu(dp
->lower
.data
);
395 uint32_t dtype
= txd_lower
& (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
);
396 unsigned int split_size
= txd_lower
& 0xffff, bytes
, sz
, op
;
397 unsigned int msh
= 0xfffff, hdr
= 0;
399 struct e1000_context_desc
*xp
= (struct e1000_context_desc
*)dp
;
400 struct e1000_tx
*tp
= &s
->tx
;
402 if (dtype
== E1000_TXD_CMD_DEXT
) { // context descriptor
403 op
= le32_to_cpu(xp
->cmd_and_length
);
404 tp
->ipcss
= xp
->lower_setup
.ip_fields
.ipcss
;
405 tp
->ipcso
= xp
->lower_setup
.ip_fields
.ipcso
;
406 tp
->ipcse
= le16_to_cpu(xp
->lower_setup
.ip_fields
.ipcse
);
407 tp
->tucss
= xp
->upper_setup
.tcp_fields
.tucss
;
408 tp
->tucso
= xp
->upper_setup
.tcp_fields
.tucso
;
409 tp
->tucse
= le16_to_cpu(xp
->upper_setup
.tcp_fields
.tucse
);
410 tp
->paylen
= op
& 0xfffff;
411 tp
->hdr_len
= xp
->tcp_seg_setup
.fields
.hdr_len
;
412 tp
->mss
= le16_to_cpu(xp
->tcp_seg_setup
.fields
.mss
);
413 tp
->ip
= (op
& E1000_TXD_CMD_IP
) ? 1 : 0;
414 tp
->tcp
= (op
& E1000_TXD_CMD_TCP
) ? 1 : 0;
415 tp
->tse
= (op
& E1000_TXD_CMD_TSE
) ? 1 : 0;
417 if (tp
->tucso
== 0) { // this is probably wrong
418 DBGOUT(TXSUM
, "TCP/UDP: cso 0!\n");
419 tp
->tucso
= tp
->tucss
+ (tp
->tcp
? 16 : 6);
422 } else if (dtype
== (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
)) {
424 tp
->sum_needed
= le32_to_cpu(dp
->upper
.data
) >> 8;
425 tp
->cptse
= ( txd_lower
& E1000_TXD_CMD_TSE
) ? 1 : 0;
430 if (vlan_enabled(s
) && is_vlan_txd(txd_lower
) &&
431 (tp
->cptse
|| txd_lower
& E1000_TXD_CMD_EOP
)) {
433 cpu_to_be16wu((uint16_t *)(tp
->vlan_header
),
434 le16_to_cpup((uint16_t *)(s
->mac_reg
+ VET
)));
435 cpu_to_be16wu((uint16_t *)(tp
->vlan_header
+ 2),
436 le16_to_cpu(dp
->upper
.fields
.special
));
439 addr
= le64_to_cpu(dp
->buffer_addr
);
440 if (tp
->tse
&& tp
->cptse
) {
445 if (tp
->size
+ bytes
> msh
)
446 bytes
= msh
- tp
->size
;
447 cpu_physical_memory_read(addr
, tp
->data
+ tp
->size
, bytes
);
448 if ((sz
= tp
->size
+ bytes
) >= hdr
&& tp
->size
< hdr
)
449 memmove(tp
->header
, tp
->data
, hdr
);
454 memmove(tp
->data
, tp
->header
, hdr
);
457 } while (split_size
-= bytes
);
458 } else if (!tp
->tse
&& tp
->cptse
) {
459 // context descriptor TSE is not set, while data descriptor TSE is set
460 DBGOUT(TXERR
, "TCP segmentaion Error\n");
462 cpu_physical_memory_read(addr
, tp
->data
+ tp
->size
, split_size
);
463 tp
->size
+= split_size
;
466 if (!(txd_lower
& E1000_TXD_CMD_EOP
))
468 if (!(tp
->tse
&& tp
->cptse
&& tp
->size
< hdr
))
478 txdesc_writeback(target_phys_addr_t base
, struct e1000_tx_desc
*dp
)
480 uint32_t txd_upper
, txd_lower
= le32_to_cpu(dp
->lower
.data
);
482 if (!(txd_lower
& (E1000_TXD_CMD_RS
|E1000_TXD_CMD_RPS
)))
484 txd_upper
= (le32_to_cpu(dp
->upper
.data
) | E1000_TXD_STAT_DD
) &
485 ~(E1000_TXD_STAT_EC
| E1000_TXD_STAT_LC
| E1000_TXD_STAT_TU
);
486 dp
->upper
.data
= cpu_to_le32(txd_upper
);
487 cpu_physical_memory_write(base
+ ((char *)&dp
->upper
- (char *)dp
),
488 (void *)&dp
->upper
, sizeof(dp
->upper
));
489 return E1000_ICR_TXDW
;
493 start_xmit(E1000State
*s
)
495 target_phys_addr_t base
;
496 struct e1000_tx_desc desc
;
497 uint32_t tdh_start
= s
->mac_reg
[TDH
], cause
= E1000_ICS_TXQE
;
499 if (!(s
->mac_reg
[TCTL
] & E1000_TCTL_EN
)) {
500 DBGOUT(TX
, "tx disabled\n");
504 while (s
->mac_reg
[TDH
] != s
->mac_reg
[TDT
]) {
505 base
= ((uint64_t)s
->mac_reg
[TDBAH
] << 32) + s
->mac_reg
[TDBAL
] +
506 sizeof(struct e1000_tx_desc
) * s
->mac_reg
[TDH
];
507 cpu_physical_memory_read(base
, (void *)&desc
, sizeof(desc
));
509 DBGOUT(TX
, "index %d: %p : %x %x\n", s
->mac_reg
[TDH
],
510 (void *)(intptr_t)desc
.buffer_addr
, desc
.lower
.data
,
513 process_tx_desc(s
, &desc
);
514 cause
|= txdesc_writeback(base
, &desc
);
516 if (++s
->mac_reg
[TDH
] * sizeof(desc
) >= s
->mac_reg
[TDLEN
])
519 * the following could happen only if guest sw assigns
520 * bogus values to TDT/TDLEN.
521 * there's nothing too intelligent we could do about this.
523 if (s
->mac_reg
[TDH
] == tdh_start
) {
524 DBGOUT(TXERR
, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
525 tdh_start
, s
->mac_reg
[TDT
], s
->mac_reg
[TDLEN
]);
529 set_ics(s
, 0, cause
);
533 receive_filter(E1000State
*s
, const uint8_t *buf
, int size
)
535 static uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
536 static int mta_shift
[] = {4, 3, 2, 0};
537 uint32_t f
, rctl
= s
->mac_reg
[RCTL
], ra
[2], *rp
;
539 if (is_vlan_packet(s
, buf
) && vlan_rx_filter_enabled(s
)) {
540 uint16_t vid
= be16_to_cpup((uint16_t *)(buf
+ 14));
541 uint32_t vfta
= le32_to_cpup((uint32_t *)(s
->mac_reg
+ VFTA
) +
542 ((vid
>> 5) & 0x7f));
543 if ((vfta
& (1 << (vid
& 0x1f))) == 0)
547 if (rctl
& E1000_RCTL_UPE
) // promiscuous
550 if ((buf
[0] & 1) && (rctl
& E1000_RCTL_MPE
)) // promiscuous mcast
553 if ((rctl
& E1000_RCTL_BAM
) && !memcmp(buf
, bcast
, sizeof bcast
))
556 for (rp
= s
->mac_reg
+ RA
; rp
< s
->mac_reg
+ RA
+ 32; rp
+= 2) {
557 if (!(rp
[1] & E1000_RAH_AV
))
559 ra
[0] = cpu_to_le32(rp
[0]);
560 ra
[1] = cpu_to_le32(rp
[1]);
561 if (!memcmp(buf
, (uint8_t *)ra
, 6)) {
563 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
564 (int)(rp
- s
->mac_reg
- RA
)/2,
565 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5]);
569 DBGOUT(RXFILTER
, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
570 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5]);
572 f
= mta_shift
[(rctl
>> E1000_RCTL_MO_SHIFT
) & 3];
573 f
= (((buf
[5] << 8) | buf
[4]) >> f
) & 0xfff;
574 if (s
->mac_reg
[MTA
+ (f
>> 5)] & (1 << (f
& 0x1f)))
577 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
578 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5],
579 (rctl
>> E1000_RCTL_MO_SHIFT
) & 3, f
>> 5,
580 s
->mac_reg
[MTA
+ (f
>> 5)]);
586 e1000_set_link_status(VLANClientState
*vc
)
588 E1000State
*s
= vc
->opaque
;
589 uint32_t old_status
= s
->mac_reg
[STATUS
];
592 s
->mac_reg
[STATUS
] &= ~E1000_STATUS_LU
;
594 s
->mac_reg
[STATUS
] |= E1000_STATUS_LU
;
596 if (s
->mac_reg
[STATUS
] != old_status
)
597 set_ics(s
, 0, E1000_ICR_LSC
);
601 e1000_can_receive(VLANClientState
*vc
)
603 E1000State
*s
= vc
->opaque
;
605 return (s
->mac_reg
[RCTL
] & E1000_RCTL_EN
);
609 e1000_receive(VLANClientState
*vc
, const uint8_t *buf
, size_t size
)
611 E1000State
*s
= vc
->opaque
;
612 struct e1000_rx_desc desc
;
613 target_phys_addr_t base
;
616 uint16_t vlan_special
= 0;
617 uint8_t vlan_status
= 0, vlan_offset
= 0;
619 if (!(s
->mac_reg
[RCTL
] & E1000_RCTL_EN
))
622 if (size
> s
->rxbuf_size
) {
623 DBGOUT(RX
, "packet too large for buffers (%lu > %d)\n",
624 (unsigned long)size
, s
->rxbuf_size
);
628 if (!receive_filter(s
, buf
, size
))
631 if (vlan_enabled(s
) && is_vlan_packet(s
, buf
)) {
632 vlan_special
= cpu_to_le16(be16_to_cpup((uint16_t *)(buf
+ 14)));
633 memmove((void *)(buf
+ 4), buf
, 12);
634 vlan_status
= E1000_RXD_STAT_VP
;
639 rdh_start
= s
->mac_reg
[RDH
];
640 size
+= 4; // for the header
642 if (s
->mac_reg
[RDH
] == s
->mac_reg
[RDT
] && s
->check_rxov
) {
643 set_ics(s
, 0, E1000_ICS_RXO
);
646 base
= ((uint64_t)s
->mac_reg
[RDBAH
] << 32) + s
->mac_reg
[RDBAL
] +
647 sizeof(desc
) * s
->mac_reg
[RDH
];
648 cpu_physical_memory_read(base
, (void *)&desc
, sizeof(desc
));
649 desc
.special
= vlan_special
;
650 desc
.status
|= (vlan_status
| E1000_RXD_STAT_DD
);
651 if (desc
.buffer_addr
) {
652 cpu_physical_memory_write(le64_to_cpu(desc
.buffer_addr
),
653 (void *)(buf
+ vlan_offset
), size
);
654 desc
.length
= cpu_to_le16(size
);
655 desc
.status
|= E1000_RXD_STAT_EOP
|E1000_RXD_STAT_IXSM
;
656 } else // as per intel docs; skip descriptors with null buf addr
657 DBGOUT(RX
, "Null RX descriptor!!\n");
658 cpu_physical_memory_write(base
, (void *)&desc
, sizeof(desc
));
660 if (++s
->mac_reg
[RDH
] * sizeof(desc
) >= s
->mac_reg
[RDLEN
])
663 /* see comment in start_xmit; same here */
664 if (s
->mac_reg
[RDH
] == rdh_start
) {
665 DBGOUT(RXERR
, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
666 rdh_start
, s
->mac_reg
[RDT
], s
->mac_reg
[RDLEN
]);
667 set_ics(s
, 0, E1000_ICS_RXO
);
670 } while (desc
.buffer_addr
== 0);
674 n
= s
->mac_reg
[TORL
];
675 if ((s
->mac_reg
[TORL
] += size
) < n
)
679 if ((rdt
= s
->mac_reg
[RDT
]) < s
->mac_reg
[RDH
])
680 rdt
+= s
->mac_reg
[RDLEN
] / sizeof(desc
);
681 if (((rdt
- s
->mac_reg
[RDH
]) * sizeof(desc
)) <= s
->mac_reg
[RDLEN
] >>
683 n
|= E1000_ICS_RXDMT0
;
691 mac_readreg(E1000State
*s
, int index
)
693 return s
->mac_reg
[index
];
697 mac_icr_read(E1000State
*s
, int index
)
699 uint32_t ret
= s
->mac_reg
[ICR
];
701 DBGOUT(INTERRUPT
, "ICR read: %x\n", ret
);
702 set_interrupt_cause(s
, 0, 0);
707 mac_read_clr4(E1000State
*s
, int index
)
709 uint32_t ret
= s
->mac_reg
[index
];
711 s
->mac_reg
[index
] = 0;
716 mac_read_clr8(E1000State
*s
, int index
)
718 uint32_t ret
= s
->mac_reg
[index
];
720 s
->mac_reg
[index
] = 0;
721 s
->mac_reg
[index
-1] = 0;
726 mac_writereg(E1000State
*s
, int index
, uint32_t val
)
728 s
->mac_reg
[index
] = val
;
732 set_rdt(E1000State
*s
, int index
, uint32_t val
)
735 s
->mac_reg
[index
] = val
& 0xffff;
739 set_16bit(E1000State
*s
, int index
, uint32_t val
)
741 s
->mac_reg
[index
] = val
& 0xffff;
745 set_dlen(E1000State
*s
, int index
, uint32_t val
)
747 s
->mac_reg
[index
] = val
& 0xfff80;
751 set_tctl(E1000State
*s
, int index
, uint32_t val
)
753 s
->mac_reg
[index
] = val
;
754 s
->mac_reg
[TDT
] &= 0xffff;
759 set_icr(E1000State
*s
, int index
, uint32_t val
)
761 DBGOUT(INTERRUPT
, "set_icr %x\n", val
);
762 set_interrupt_cause(s
, 0, s
->mac_reg
[ICR
] & ~val
);
766 set_imc(E1000State
*s
, int index
, uint32_t val
)
768 s
->mac_reg
[IMS
] &= ~val
;
773 set_ims(E1000State
*s
, int index
, uint32_t val
)
775 s
->mac_reg
[IMS
] |= val
;
779 #define getreg(x) [x] = mac_readreg
780 static uint32_t (*macreg_readops
[])(E1000State
*, int) = {
781 getreg(PBA
), getreg(RCTL
), getreg(TDH
), getreg(TXDCTL
),
782 getreg(WUFC
), getreg(TDT
), getreg(CTRL
), getreg(LEDCTL
),
783 getreg(MANC
), getreg(MDIC
), getreg(SWSM
), getreg(STATUS
),
784 getreg(TORL
), getreg(TOTL
), getreg(IMS
), getreg(TCTL
),
785 getreg(RDH
), getreg(RDT
), getreg(VET
),
787 [TOTH
] = mac_read_clr8
, [TORH
] = mac_read_clr8
, [GPRC
] = mac_read_clr4
,
788 [GPTC
] = mac_read_clr4
, [TPR
] = mac_read_clr4
, [TPT
] = mac_read_clr4
,
789 [ICR
] = mac_icr_read
, [EECD
] = get_eecd
, [EERD
] = flash_eerd_read
,
790 [CRCERRS
... MPC
] = &mac_readreg
,
791 [RA
... RA
+31] = &mac_readreg
,
792 [MTA
... MTA
+127] = &mac_readreg
,
793 [VFTA
... VFTA
+127] = &mac_readreg
,
795 enum { NREADOPS
= ARRAY_SIZE(macreg_readops
) };
797 #define putreg(x) [x] = mac_writereg
798 static void (*macreg_writeops
[])(E1000State
*, int, uint32_t) = {
799 putreg(PBA
), putreg(EERD
), putreg(SWSM
), putreg(WUFC
),
800 putreg(TDBAL
), putreg(TDBAH
), putreg(TXDCTL
), putreg(RDBAH
),
801 putreg(RDBAL
), putreg(LEDCTL
), putreg(VET
),
802 [TDLEN
] = set_dlen
, [RDLEN
] = set_dlen
, [TCTL
] = set_tctl
,
803 [TDT
] = set_tctl
, [MDIC
] = set_mdic
, [ICS
] = set_ics
,
804 [TDH
] = set_16bit
, [RDH
] = set_16bit
, [RDT
] = set_rdt
,
805 [IMC
] = set_imc
, [IMS
] = set_ims
, [ICR
] = set_icr
,
806 [EECD
] = set_eecd
, [RCTL
] = set_rx_control
, [CTRL
] = set_ctrl
,
807 [RA
... RA
+31] = &mac_writereg
,
808 [MTA
... MTA
+127] = &mac_writereg
,
809 [VFTA
... VFTA
+127] = &mac_writereg
,
811 enum { NWRITEOPS
= ARRAY_SIZE(macreg_writeops
) };
814 e1000_mmio_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
816 E1000State
*s
= opaque
;
817 unsigned int index
= (addr
& 0x1ffff) >> 2;
819 #ifdef TARGET_WORDS_BIGENDIAN
822 if (index
< NWRITEOPS
&& macreg_writeops
[index
])
823 macreg_writeops
[index
](s
, index
, val
);
824 else if (index
< NREADOPS
&& macreg_readops
[index
])
825 DBGOUT(MMIO
, "e1000_mmio_writel RO %x: 0x%04x\n", index
<<2, val
);
827 DBGOUT(UNKNOWN
, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
832 e1000_mmio_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
834 // emulate hw without byte enables: no RMW
835 e1000_mmio_writel(opaque
, addr
& ~3,
836 (val
& 0xffff) << (8*(addr
& 3)));
840 e1000_mmio_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
842 // emulate hw without byte enables: no RMW
843 e1000_mmio_writel(opaque
, addr
& ~3,
844 (val
& 0xff) << (8*(addr
& 3)));
848 e1000_mmio_readl(void *opaque
, target_phys_addr_t addr
)
850 E1000State
*s
= opaque
;
851 unsigned int index
= (addr
& 0x1ffff) >> 2;
853 if (index
< NREADOPS
&& macreg_readops
[index
])
855 uint32_t val
= macreg_readops
[index
](s
, index
);
856 #ifdef TARGET_WORDS_BIGENDIAN
861 DBGOUT(UNKNOWN
, "MMIO unknown read addr=0x%08x\n", index
<<2);
866 e1000_mmio_readb(void *opaque
, target_phys_addr_t addr
)
868 return ((e1000_mmio_readl(opaque
, addr
& ~3)) >>
869 (8 * (addr
& 3))) & 0xff;
873 e1000_mmio_readw(void *opaque
, target_phys_addr_t addr
)
875 return ((e1000_mmio_readl(opaque
, addr
& ~3)) >>
876 (8 * (addr
& 3))) & 0xffff;
879 static const int mac_regtosave
[] = {
880 CTRL
, EECD
, EERD
, GPRC
, GPTC
, ICR
, ICS
, IMC
, IMS
,
881 LEDCTL
, MANC
, MDIC
, MPC
, PBA
, RCTL
, RDBAH
, RDBAL
, RDH
,
882 RDLEN
, RDT
, STATUS
, SWSM
, TCTL
, TDBAH
, TDBAL
, TDH
, TDLEN
,
883 TDT
, TORH
, TORL
, TOTH
, TOTL
, TPR
, TPT
, TXDCTL
, WUFC
,
886 enum { MAC_NSAVE
= ARRAY_SIZE(mac_regtosave
) };
888 static const struct {
891 } mac_regarraystosave
[] = { {32, RA
}, {128, MTA
}, {128, VFTA
} };
892 enum { MAC_NARRAYS
= ARRAY_SIZE(mac_regarraystosave
) };
895 nic_save(QEMUFile
*f
, void *opaque
)
897 E1000State
*s
= (E1000State
*)opaque
;
900 pci_device_save(&s
->dev
, f
);
902 qemu_put_be32s(f
, &s
->rxbuf_size
);
903 qemu_put_be32s(f
, &s
->rxbuf_min_shift
);
904 qemu_put_be32s(f
, &s
->eecd_state
.val_in
);
905 qemu_put_be16s(f
, &s
->eecd_state
.bitnum_in
);
906 qemu_put_be16s(f
, &s
->eecd_state
.bitnum_out
);
907 qemu_put_be16s(f
, &s
->eecd_state
.reading
);
908 qemu_put_be32s(f
, &s
->eecd_state
.old_eecd
);
909 qemu_put_8s(f
, &s
->tx
.ipcss
);
910 qemu_put_8s(f
, &s
->tx
.ipcso
);
911 qemu_put_be16s(f
, &s
->tx
.ipcse
);
912 qemu_put_8s(f
, &s
->tx
.tucss
);
913 qemu_put_8s(f
, &s
->tx
.tucso
);
914 qemu_put_be16s(f
, &s
->tx
.tucse
);
915 qemu_put_be32s(f
, &s
->tx
.paylen
);
916 qemu_put_8s(f
, &s
->tx
.hdr_len
);
917 qemu_put_be16s(f
, &s
->tx
.mss
);
918 qemu_put_be16s(f
, &s
->tx
.size
);
919 qemu_put_be16s(f
, &s
->tx
.tso_frames
);
920 qemu_put_8s(f
, &s
->tx
.sum_needed
);
921 qemu_put_s8s(f
, &s
->tx
.ip
);
922 qemu_put_s8s(f
, &s
->tx
.tcp
);
923 qemu_put_buffer(f
, s
->tx
.header
, sizeof s
->tx
.header
);
924 qemu_put_buffer(f
, s
->tx
.data
, sizeof s
->tx
.data
);
925 for (i
= 0; i
< 64; i
++)
926 qemu_put_be16s(f
, s
->eeprom_data
+ i
);
927 for (i
= 0; i
< 0x20; i
++)
928 qemu_put_be16s(f
, s
->phy_reg
+ i
);
929 for (i
= 0; i
< MAC_NSAVE
; i
++)
930 qemu_put_be32s(f
, s
->mac_reg
+ mac_regtosave
[i
]);
931 for (i
= 0; i
< MAC_NARRAYS
; i
++)
932 for (j
= 0; j
< mac_regarraystosave
[i
].size
; j
++)
934 s
->mac_reg
+ mac_regarraystosave
[i
].array0
+ j
);
938 nic_load(QEMUFile
*f
, void *opaque
, int version_id
)
940 E1000State
*s
= (E1000State
*)opaque
;
943 if ((ret
= pci_device_load(&s
->dev
, f
)) < 0)
946 qemu_get_sbe32s(f
, &i
); /* once some unused instance id */
947 qemu_get_be32(f
); /* Ignored. Was mmio_base. */
948 qemu_get_be32s(f
, &s
->rxbuf_size
);
949 qemu_get_be32s(f
, &s
->rxbuf_min_shift
);
950 qemu_get_be32s(f
, &s
->eecd_state
.val_in
);
951 qemu_get_be16s(f
, &s
->eecd_state
.bitnum_in
);
952 qemu_get_be16s(f
, &s
->eecd_state
.bitnum_out
);
953 qemu_get_be16s(f
, &s
->eecd_state
.reading
);
954 qemu_get_be32s(f
, &s
->eecd_state
.old_eecd
);
955 qemu_get_8s(f
, &s
->tx
.ipcss
);
956 qemu_get_8s(f
, &s
->tx
.ipcso
);
957 qemu_get_be16s(f
, &s
->tx
.ipcse
);
958 qemu_get_8s(f
, &s
->tx
.tucss
);
959 qemu_get_8s(f
, &s
->tx
.tucso
);
960 qemu_get_be16s(f
, &s
->tx
.tucse
);
961 qemu_get_be32s(f
, &s
->tx
.paylen
);
962 qemu_get_8s(f
, &s
->tx
.hdr_len
);
963 qemu_get_be16s(f
, &s
->tx
.mss
);
964 qemu_get_be16s(f
, &s
->tx
.size
);
965 qemu_get_be16s(f
, &s
->tx
.tso_frames
);
966 qemu_get_8s(f
, &s
->tx
.sum_needed
);
967 qemu_get_s8s(f
, &s
->tx
.ip
);
968 qemu_get_s8s(f
, &s
->tx
.tcp
);
969 qemu_get_buffer(f
, s
->tx
.header
, sizeof s
->tx
.header
);
970 qemu_get_buffer(f
, s
->tx
.data
, sizeof s
->tx
.data
);
971 for (i
= 0; i
< 64; i
++)
972 qemu_get_be16s(f
, s
->eeprom_data
+ i
);
973 for (i
= 0; i
< 0x20; i
++)
974 qemu_get_be16s(f
, s
->phy_reg
+ i
);
975 for (i
= 0; i
< MAC_NSAVE
; i
++)
976 qemu_get_be32s(f
, s
->mac_reg
+ mac_regtosave
[i
]);
977 for (i
= 0; i
< MAC_NARRAYS
; i
++)
978 for (j
= 0; j
< mac_regarraystosave
[i
].size
; j
++)
980 s
->mac_reg
+ mac_regarraystosave
[i
].array0
+ j
);
985 static const uint16_t e1000_eeprom_template
[64] = {
986 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
987 0x3000, 0x1000, 0x6403, E1000_DEVID
, 0x8086, E1000_DEVID
, 0x8086, 0x3040,
988 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
989 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
990 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
991 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
992 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
993 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
996 static const uint16_t phy_reg_init
[] = {
997 [PHY_CTRL
] = 0x1140, [PHY_STATUS
] = 0x796d, // link initially up
998 [PHY_ID1
] = 0x141, [PHY_ID2
] = PHY_ID2_INIT
,
999 [PHY_1000T_CTRL
] = 0x0e00, [M88E1000_PHY_SPEC_CTRL
] = 0x360,
1000 [M88E1000_EXT_PHY_SPEC_CTRL
] = 0x0d60, [PHY_AUTONEG_ADV
] = 0xde1,
1001 [PHY_LP_ABILITY
] = 0x1e0, [PHY_1000T_STATUS
] = 0x3c00,
1002 [M88E1000_PHY_SPEC_STATUS
] = 0xac00,
1005 static const uint32_t mac_reg_init
[] = {
1008 [CTRL
] = E1000_CTRL_SWDPIN2
| E1000_CTRL_SWDPIN0
|
1009 E1000_CTRL_SPD_1000
| E1000_CTRL_SLU
,
1010 [STATUS
] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE
|
1011 E1000_STATUS_ASDV
| E1000_STATUS_MTXCKOK
|
1012 E1000_STATUS_SPEED_1000
| E1000_STATUS_FD
|
1014 [MANC
] = E1000_MANC_EN_MNG2HOST
| E1000_MANC_RCV_TCO_EN
|
1015 E1000_MANC_ARP_EN
| E1000_MANC_0298_EN
|
1021 static CPUWriteMemoryFunc
*e1000_mmio_write
[] = {
1022 e1000_mmio_writeb
, e1000_mmio_writew
, e1000_mmio_writel
1025 static CPUReadMemoryFunc
*e1000_mmio_read
[] = {
1026 e1000_mmio_readb
, e1000_mmio_readw
, e1000_mmio_readl
1030 e1000_mmio_map(PCIDevice
*pci_dev
, int region_num
,
1031 uint32_t addr
, uint32_t size
, int type
)
1033 E1000State
*d
= (E1000State
*)pci_dev
;
1035 const uint32_t excluded_regs
[] = {
1036 E1000_MDIC
, E1000_ICR
, E1000_ICS
, E1000_IMS
,
1037 E1000_IMC
, E1000_TCTL
, E1000_TDT
, PNPMMIO_SIZE
1041 DBGOUT(MMIO
, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr
, size
);
1043 cpu_register_physical_memory(addr
, PNPMMIO_SIZE
, d
->mmio_index
);
1044 qemu_register_coalesced_mmio(addr
, excluded_regs
[0]);
1046 for (i
= 0; excluded_regs
[i
] != PNPMMIO_SIZE
; i
++)
1047 qemu_register_coalesced_mmio(addr
+ excluded_regs
[i
] + 4,
1048 excluded_regs
[i
+ 1] -
1049 excluded_regs
[i
] - 4);
1053 e1000_cleanup(VLANClientState
*vc
)
1055 E1000State
*d
= vc
->opaque
;
1057 unregister_savevm("e1000", d
);
1061 pci_e1000_uninit(PCIDevice
*dev
)
1063 E1000State
*d
= (E1000State
*) dev
;
1065 cpu_unregister_io_memory(d
->mmio_index
);
1070 static void e1000_reset(void *opaque
)
1072 E1000State
*d
= opaque
;
1074 memset(d
->phy_reg
, 0, sizeof d
->phy_reg
);
1075 memmove(d
->phy_reg
, phy_reg_init
, sizeof phy_reg_init
);
1076 memset(d
->mac_reg
, 0, sizeof d
->mac_reg
);
1077 memmove(d
->mac_reg
, mac_reg_init
, sizeof mac_reg_init
);
1078 d
->rxbuf_min_shift
= 1;
1079 memset(&d
->tx
, 0, sizeof d
->tx
);
1083 static void pci_e1000_init(PCIDevice
*pci_dev
)
1085 E1000State
*d
= (E1000State
*)pci_dev
;
1087 uint16_t checksum
= 0;
1088 static const char info_str
[] = "e1000";
1092 pci_conf
= d
->dev
.config
;
1094 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
1095 pci_config_set_device_id(pci_conf
, E1000_DEVID
);
1096 *(uint16_t *)(pci_conf
+0x04) = cpu_to_le16(0x0407);
1097 *(uint16_t *)(pci_conf
+0x06) = cpu_to_le16(0x0010);
1098 pci_conf
[0x08] = 0x03;
1099 pci_config_set_class(pci_conf
, PCI_CLASS_NETWORK_ETHERNET
);
1100 pci_conf
[0x0c] = 0x10;
1102 pci_conf
[0x3d] = 1; // interrupt pin 0
1104 d
->mmio_index
= cpu_register_io_memory(0, e1000_mmio_read
,
1105 e1000_mmio_write
, d
);
1107 pci_register_io_region((PCIDevice
*)d
, 0, PNPMMIO_SIZE
,
1108 PCI_ADDRESS_SPACE_MEM
, e1000_mmio_map
);
1110 pci_register_io_region((PCIDevice
*)d
, 1, IOPORT_SIZE
,
1111 PCI_ADDRESS_SPACE_IO
, ioport_map
);
1113 memmove(d
->eeprom_data
, e1000_eeprom_template
,
1114 sizeof e1000_eeprom_template
);
1115 qdev_get_macaddr(&d
->dev
.qdev
, macaddr
);
1116 for (i
= 0; i
< 3; i
++)
1117 d
->eeprom_data
[i
] = (macaddr
[2*i
+1]<<8) | macaddr
[2*i
];
1118 for (i
= 0; i
< EEPROM_CHECKSUM_REG
; i
++)
1119 checksum
+= d
->eeprom_data
[i
];
1120 checksum
= (uint16_t) EEPROM_SUM
- checksum
;
1121 d
->eeprom_data
[EEPROM_CHECKSUM_REG
] = checksum
;
1123 d
->vc
= qdev_get_vlan_client(&d
->dev
.qdev
,
1124 e1000_can_receive
, e1000_receive
,
1125 NULL
, e1000_cleanup
, d
);
1126 d
->vc
->link_status_changed
= e1000_set_link_status
;
1128 qemu_format_nic_info_str(d
->vc
, macaddr
);
1130 register_savevm(info_str
, -1, 2, nic_save
, nic_load
, d
);
1131 d
->dev
.unregister
= pci_e1000_uninit
;
1132 qemu_register_reset(e1000_reset
, 0, d
);
1136 static void e1000_register_devices(void)
1138 pci_qdev_register("e1000", sizeof(E1000State
), pci_e1000_init
);
1141 device_init(e1000_register_devices
)