Remove building with NOCRYPTO option
[minix3.git] / minix / drivers / net / 3c90x / 3c90x.c
blob8b0304cc1399591955442a23216d0cd33abd3b46
1 /* 3Com 3C90xB/C EtherLink driver, by D.C. van Moolenbroek */
3 #include <minix/drivers.h>
4 #include <minix/netdriver.h>
6 #include <machine/pci.h>
7 #include <sys/mman.h>
8 #include <assert.h>
10 #include "3c90x.h"
12 #define VERBOSE 0 /* verbose debugging output */
14 #if VERBOSE
15 #define XLBC_DEBUG(x) printf x
16 #else
17 #define XLBC_DEBUG(x)
18 #endif
20 static struct {
21 int hook_id; /* IRQ hook ID */
22 uint8_t *base; /* base address of memory-mapped registers */
23 uint32_t size; /* size of memory-mapped register area */
24 uint16_t window; /* currently active register window */
25 uint16_t filter; /* packet receipt filter flags */
27 xlbc_pd_t *dpd_base; /* TX descriptor array, virtual address */
28 phys_bytes dpd_phys; /* TX descriptor array, physical address */
29 uint8_t *txb_base; /* transmission buffer, virtual address */
30 phys_bytes txb_phys; /* transmission buffer, physical address */
31 xlbc_pd_t *upd_base; /* RX descriptor array, virtual address */
32 phys_bytes upd_phys; /* RX descriptor array, physical address */
33 uint8_t *rxb_base; /* receipt buffers, virtual address */
34 phys_bytes rxb_phys; /* receipt buffers, physical address */
36 unsigned int dpd_tail; /* index of tail TX descriptor */
37 unsigned int dpd_used; /* number of in-use TX descriptors */
38 size_t txb_tail; /* index of tail TX byte in buffer */
39 size_t txb_used; /* number of in-use TX buffer bytes */
40 unsigned int upd_head; /* index of head RX descriptor */
41 } state;
43 enum xlbc_link_type {
44 XLBC_LINK_DOWN,
45 XLBC_LINK_UP,
46 XLBC_LINK_UP_T_HD,
47 XLBC_LINK_UP_T_FD,
48 XLBC_LINK_UP_TX_HD,
49 XLBC_LINK_UP_TX_FD
52 #define XLBC_READ_8(off) (*(volatile uint8_t *)(state.base + (off)))
53 #define XLBC_READ_16(off) (*(volatile uint16_t *)(state.base + (off)))
54 #define XLBC_READ_32(off) (*(volatile uint32_t *)(state.base + (off)))
55 #define XLBC_WRITE_8(off, val) \
56 (*(volatile uint8_t *)(state.base + (off)) = (val))
57 #define XLBC_WRITE_16(off, val) \
58 (*(volatile uint16_t *)(state.base + (off)) = (val))
59 #define XLBC_WRITE_32(off, val) \
60 (*(volatile uint32_t *)(state.base + (off)) = (val))
62 static int xlbc_init(unsigned int, netdriver_addr_t *, uint32_t *,
63 unsigned int *);
64 static void xlbc_stop(void);
65 static void xlbc_set_mode(unsigned int, const netdriver_addr_t *,
66 unsigned int);
67 static ssize_t xlbc_recv(struct netdriver_data *, size_t);
68 static int xlbc_send(struct netdriver_data *, size_t);
69 static void xlbc_intr(unsigned int);
70 static void xlbc_tick(void);
72 static const struct netdriver xlbc_table = {
73 .ndr_name = "xl",
74 .ndr_init = xlbc_init,
75 .ndr_stop = xlbc_stop,
76 .ndr_set_mode = xlbc_set_mode,
77 .ndr_recv = xlbc_recv,
78 .ndr_send = xlbc_send,
79 .ndr_intr = xlbc_intr,
80 .ndr_tick = xlbc_tick
84 * Find a matching PCI device.
86 static int
87 xlbc_probe(unsigned int skip)
89 uint16_t vid, did;
90 int devind;
91 #if VERBOSE
92 const char *dname;
93 #endif
95 pci_init();
97 if (pci_first_dev(&devind, &vid, &did) <= 0)
98 return -1;
100 while (skip--) {
101 if (pci_next_dev(&devind, &vid, &did) <= 0)
102 return -1;
105 #if VERBOSE
106 dname = pci_dev_name(vid, did);
107 XLBC_DEBUG(("%s: found %s (%04x:%04x) at %s\n", netdriver_name(),
108 dname ? dname : "<unknown>", vid, did, pci_slot_name(devind)));
109 #endif
111 pci_reserve(devind);
113 return devind;
117 * Issue a command to the command register.
119 static void
120 xlbc_issue_cmd(uint16_t cmd)
123 assert(!(XLBC_READ_16(XLBC_STATUS_REG) & XLBC_STATUS_IN_PROGRESS));
125 XLBC_WRITE_16(XLBC_CMD_REG, cmd);
129 * Wait for a command to be acknowledged. Return TRUE iff the command
130 * completed within the timeout period.
132 static int
133 xlbc_wait_cmd(void)
135 spin_t spin;
138 * The documentation implies that a timeout of 1ms is an upper bound
139 * for all commands.
141 SPIN_FOR(&spin, XLBC_CMD_TIMEOUT) {
142 if (!(XLBC_READ_16(XLBC_STATUS_REG) & XLBC_STATUS_IN_PROGRESS))
143 return TRUE;
146 return FALSE;
150 * Reset the device to its initial state. Return TRUE iff successful.
152 static int
153 xlbc_reset(void)
156 (void)xlbc_wait_cmd();
158 xlbc_issue_cmd(XLBC_CMD_GLOBAL_RESET);
161 * It appears that the "command in progress" bit may be cleared before
162 * the reset has completed, resulting in strange behavior afterwards.
163 * Thus, we wait for the maximum reset time (1ms) regardless first, and
164 * only then start checking the command-in-progress bit.
166 micro_delay(XLBC_RESET_DELAY);
168 if (!xlbc_wait_cmd())
169 return FALSE;
171 state.window = 0;
173 return TRUE;
177 * Select a register window.
179 static void
180 xlbc_select_window(unsigned int window)
183 if (state.window == window)
184 return;
186 xlbc_issue_cmd(XLBC_CMD_SELECT_WINDOW | window);
188 state.window = window;
192 * Read a word from the EEPROM. On failure, return a value with all bits set.
194 static uint16_t
195 xlbc_read_eeprom(unsigned int word)
197 spin_t spin;
199 /* The B revision supports 64 EEPROM words only. */
200 assert(!(word & ~XLBC_EEPROM_CMD_ADDR));
202 xlbc_select_window(XLBC_EEPROM_WINDOW);
204 assert(!(XLBC_READ_16(XLBC_EEPROM_CMD_REG) & XLBC_EEPROM_CMD_BUSY));
206 XLBC_WRITE_16(XLBC_EEPROM_CMD_REG, XLBC_EEPROM_CMD_READ | word);
208 /* The documented maximum delay for reads is 162us. */
209 SPIN_FOR(&spin, XLBC_EEPROM_TIMEOUT) {
210 if (!(XLBC_READ_16(XLBC_EEPROM_CMD_REG) &
211 XLBC_EEPROM_CMD_BUSY))
212 return XLBC_READ_16(XLBC_EEPROM_DATA_REG);
215 return (uint16_t)-1;
219 * Obtain the preconfigured hardware address of the device.
221 static void
222 xlbc_get_hwaddr(netdriver_addr_t * addr)
224 uint16_t word[3];
226 /* TODO: allow overriding through environment variables */
228 word[0] = xlbc_read_eeprom(XLBC_EEPROM_WORD_OEM_ADDR0);
229 word[1] = xlbc_read_eeprom(XLBC_EEPROM_WORD_OEM_ADDR1);
230 word[2] = xlbc_read_eeprom(XLBC_EEPROM_WORD_OEM_ADDR2);
232 addr->na_addr[0] = word[0] >> 8;
233 addr->na_addr[1] = word[0] & 0xff;
234 addr->na_addr[2] = word[1] >> 8;
235 addr->na_addr[3] = word[1] & 0xff;
236 addr->na_addr[4] = word[2] >> 8;
237 addr->na_addr[5] = word[2] & 0xff;
239 XLBC_DEBUG(("%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
240 netdriver_name(),
241 addr->na_addr[0], addr->na_addr[1], addr->na_addr[2],
242 addr->na_addr[3], addr->na_addr[4], addr->na_addr[5]));
246 * Configure the device to use the given hardware address.
248 static void
249 xlbc_set_hwaddr(netdriver_addr_t * addr)
252 xlbc_select_window(XLBC_STATION_WINDOW);
254 /* Set station address. */
255 XLBC_WRITE_16(XLBC_STATION_ADDR0_REG,
256 addr->na_addr[0] | (addr->na_addr[1] << 8));
257 XLBC_WRITE_16(XLBC_STATION_ADDR1_REG,
258 addr->na_addr[2] | (addr->na_addr[3] << 8));
259 XLBC_WRITE_16(XLBC_STATION_ADDR2_REG,
260 addr->na_addr[4] | (addr->na_addr[5] << 8));
262 /* Set station mask. */
263 XLBC_WRITE_16(XLBC_STATION_MASK0_REG, 0);
264 XLBC_WRITE_16(XLBC_STATION_MASK1_REG, 0);
265 XLBC_WRITE_16(XLBC_STATION_MASK2_REG, 0);
269 * Perform one-time initialization of various settings.
271 static void
272 xlbc_init_once(void)
274 uint16_t word;
275 uint32_t dword;
278 * Verify the presence of a 10BASE-T or 100BASE-TX port. Those are the
279 * only port types that are supported and have been tested so far.
281 xlbc_select_window(XLBC_MEDIA_OPT_WINDOW);
283 word = XLBC_READ_16(XLBC_MEDIA_OPT_REG);
284 if (!(word & (XLBC_MEDIA_OPT_BASE_TX | XLBC_MEDIA_OPT_10_BT)))
285 panic("no 100BASE-TX or 10BASE-T port on device");
287 /* Initialize the device's internal configuration. */
288 xlbc_select_window(XLBC_CONFIG_WINDOW);
290 word = XLBC_READ_16(XLBC_CONFIG_WORD1_REG);
291 word = (word & ~XLBC_CONFIG_XCVR_MASK) | XLBC_CONFIG_XCVR_AUTO;
292 XLBC_WRITE_16(XLBC_CONFIG_WORD1_REG, word);
294 /* Disable alternate upload and download sequences. */
295 dword = XLBC_READ_32(XLBC_DMA_CTRL_REG);
296 dword |= XLBC_DMA_CTRL_UP_NOALT | XLBC_DMA_CTRL_DN_NOALT;
297 XLBC_WRITE_32(XLBC_DMA_CTRL_REG, dword);
299 /* Specify in which status events we are interested. */
300 xlbc_issue_cmd(XLBC_CMD_IND_ENABLE | XLBC_STATUS_MASK);
302 /* Enable statistics, including support for counters' upper bits. */
303 xlbc_select_window(XLBC_NET_DIAG_WINDOW);
305 word = XLBC_READ_16(XLBC_NET_DIAG_REG);
306 XLBC_WRITE_16(XLBC_NET_DIAG_REG, word | XLBC_NET_DIAG_UPPER);
308 xlbc_issue_cmd(XLBC_CMD_STATS_ENABLE);
312 * Allocate memory for DMA.
314 static void
315 xlbc_alloc_dma(void)
318 /* Packet descriptors require 8-byte alignment. */
319 assert(!(sizeof(xlbc_pd_t) % 8));
322 * For packet transmission, we use one single circular buffer in which
323 * we store packet data. We do not split packets in two when the
324 * buffer wraps; instead we waste the trailing bytes and move on to the
325 * start of the buffer. This allows us to use a single fragment for
326 * each transmitted packet, thus keeping the descriptors small (16
327 * bytes). The descriptors themselves are allocated as a separate
328 * array. There is obviously room for improvement here, but the
329 * approach should be good enough.
331 state.dpd_base = alloc_contig(XLBC_DPD_COUNT * sizeof(xlbc_pd_t),
332 AC_ALIGN4K, &state.dpd_phys);
333 state.txb_base = alloc_contig(XLBC_TXB_SIZE, 0, &state.txb_phys);
335 if (state.dpd_base == NULL || state.txb_base == NULL)
336 panic("unable to allocate memory for packet transmission");
339 * For packet receipt, we have a number of pairs of buffers and
340 * corresponding descriptors. Each buffer is large enough to contain
341 * an entire packet. We avoid wasting memory by allocating the buffers
342 * in one go, at the cost of requiring a large contiguous area. The
343 * descriptors are allocated as a separate array, thus matching the
344 * scheme for transmission in terms of allocation strategy. Here, too,
345 * there is clear room for improvement at the cost of extra complexity.
347 state.upd_base = alloc_contig(XLBC_UPD_COUNT * sizeof(xlbc_pd_t),
348 AC_ALIGN4K, &state.upd_phys);
349 state.rxb_base = alloc_contig(XLBC_UPD_COUNT * XLBC_MAX_PKT_LEN, 0,
350 &state.rxb_phys);
352 if (state.upd_base == NULL || state.rxb_base == NULL)
353 panic("unable to allocate memory for packet receipt");
357 * Reset the transmitter.
359 static void
360 xlbc_reset_tx(void)
363 xlbc_issue_cmd(XLBC_CMD_TX_RESET);
364 if (!xlbc_wait_cmd())
365 panic("timeout trying to reset transmitter");
367 state.dpd_tail = 0;
368 state.dpd_used = 0;
369 state.txb_tail = 0;
370 state.txb_used = 0;
372 xlbc_issue_cmd(XLBC_CMD_TX_ENABLE);
376 * Reset the receiver.
378 static void
379 xlbc_reset_rx(void)
381 unsigned int i;
383 xlbc_issue_cmd(XLBC_CMD_RX_RESET);
384 if (!xlbc_wait_cmd())
385 panic("timeout trying to reset receiver");
387 xlbc_issue_cmd(XLBC_CMD_SET_FILTER | state.filter);
389 for (i = 0; i < XLBC_UPD_COUNT; i++) {
390 state.upd_base[i].next = state.upd_phys +
391 ((i + 1) % XLBC_UPD_COUNT) * sizeof(xlbc_pd_t);
392 state.upd_base[i].flags = 0;
393 state.upd_base[i].addr = state.rxb_phys + i * XLBC_MAX_PKT_LEN;
394 state.upd_base[i].len = XLBC_LEN_LAST | XLBC_MAX_PKT_LEN;
397 XLBC_WRITE_32(XLBC_UP_LIST_PTR_REG, state.upd_phys);
399 state.upd_head = 0;
401 __insn_barrier();
403 xlbc_issue_cmd(XLBC_CMD_RX_ENABLE);
407 * Execute a MII read, write, or Z cycle. Stop the clock, wait, start the
408 * clock, optionally change direction and/or data bits, and wait again.
410 static uint16_t
411 xlbc_mii_cycle(uint16_t val, uint16_t mask, uint16_t bits)
414 val &= ~XLBC_PHYS_MGMT_CLK;
415 XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
417 /* All the delays should be 200ns minimum. */
418 micro_delay(XLBC_MII_DELAY);
420 /* The clock must be enabled separately from other bit updates. */
421 val |= XLBC_PHYS_MGMT_CLK;
422 XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
424 if (mask != 0) {
425 val = (val & ~mask) | bits;
426 XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
429 micro_delay(XLBC_MII_DELAY);
431 return val;
435 * Read a MII register.
437 static uint16_t
438 xlbc_mii_read(uint16_t phy, uint16_t reg)
440 uint32_t dword;
441 uint16_t val;
442 int i;
444 xlbc_select_window(XLBC_PHYS_MGMT_WINDOW);
446 /* Set the direction to write. */
447 val = XLBC_READ_16(XLBC_PHYS_MGMT_REG) | XLBC_PHYS_MGMT_DIR;
449 XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
451 /* Execute write cycles to submit the preamble: PR=1..1 (32 bits) */
452 for (i = 0; i < 32; i++)
453 val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
454 XLBC_PHYS_MGMT_DATA);
456 /* Execute write cycles to submit the rest of the read frame. */
457 /* ST=01 OP=10 PHYAD=aaaaa REGAD=rrrrr */
458 dword = 0x1800 | (phy << 5) | reg;
460 for (i = 13; i >= 0; i--)
461 val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
462 ((dword >> i) & 1) ? XLBC_PHYS_MGMT_DATA : 0);
464 /* Execute a Z cycle to set the direction to read. */
465 val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DIR, 0);
467 dword = 0;
469 /* Receive one status bit and 16 actual data bits. */
470 for (i = 16; i >= 0; i--) {
471 (void)xlbc_mii_cycle(val, 0, 0);
473 val = XLBC_READ_16(XLBC_PHYS_MGMT_REG);
475 dword = (dword << 1) | !!(val & XLBC_PHYS_MGMT_DATA);
477 micro_delay(XLBC_MII_DELAY);
480 /* Execute a Z cycle to terminate the read frame. */
481 (void)xlbc_mii_cycle(val, 0, 0);
483 /* If the status bit was set, the results are invalid. */
484 if (dword & 0x10000)
485 dword = 0xffff;
487 return (uint16_t)dword;
491 * Write a MII register.
493 static void
494 xlbc_mii_write(uint16_t phy, uint16_t reg, uint16_t data)
496 uint32_t dword;
497 uint16_t val;
498 int i;
500 xlbc_select_window(XLBC_PHYS_MGMT_WINDOW);
502 /* Set the direction to write. */
503 val = XLBC_READ_16(XLBC_PHYS_MGMT_REG) | XLBC_PHYS_MGMT_DIR;
505 XLBC_WRITE_16(XLBC_PHYS_MGMT_REG, val);
507 /* Execute write cycles to submit the preamble: PR=1..1 (32 bits) */
508 for (i = 0; i < 32; i++)
509 val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
510 XLBC_PHYS_MGMT_DATA);
512 /* Execute write cycles to submit the rest of the read frame. */
513 /* ST=01 OP=01 PHYAD=aaaaa REGAD=rrrrr TA=10 DATA=d..d (16 bits) */
514 dword = 0x50020000 | (phy << 23) | (reg << 18) | data;
516 for (i = 31; i >= 0; i--)
517 val = xlbc_mii_cycle(val, XLBC_PHYS_MGMT_DATA,
518 ((dword >> i) & 1) ? XLBC_PHYS_MGMT_DATA : 0);
520 /* Execute a Z cycle to terminate the write frame. */
521 (void)xlbc_mii_cycle(val, 0, 0);
525 * Return a human-readable description for the given link type.
527 #if VERBOSE
528 static const char *
529 xlbc_get_link_name(enum xlbc_link_type link_type)
532 switch (link_type) {
533 case XLBC_LINK_DOWN: return "down";
534 case XLBC_LINK_UP: return "up";
535 case XLBC_LINK_UP_T_HD: return "up (10Mbps, half duplex)";
536 case XLBC_LINK_UP_T_FD: return "up (10Mbps, full duplex)";
537 case XLBC_LINK_UP_TX_HD: return "up (100Mbps, half duplex)";
538 case XLBC_LINK_UP_TX_FD: return "up (100Mbps, full duplex)";
539 default: return "(unknown)";
542 #endif /* VERBOSE */
545 * Determine the current link status, and return the resulting link type.
547 static enum xlbc_link_type
548 xlbc_get_link_type(void)
550 uint16_t status, control, mask;
552 xlbc_select_window(XLBC_MEDIA_STS_WINDOW);
554 if (!(XLBC_READ_16(XLBC_MEDIA_STS_REG) & XLBC_MEDIA_STS_LINK_DET))
555 return XLBC_LINK_DOWN;
557 status = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_STATUS);
558 if (!(status & XLBC_MII_STATUS_EXTCAP))
559 return XLBC_LINK_UP;
560 if (!(status & XLBC_MII_STATUS_AUTONEG))
561 return XLBC_LINK_UP;
563 /* Wait for auto-negotiation to complete first. */
564 if (!(status & XLBC_MII_STATUS_COMPLETE)) {
565 control = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_CONTROL);
566 control |= XLBC_MII_CONTROL_AUTONEG;
567 xlbc_mii_write(XLBC_PHY_ADDR, XLBC_MII_CONTROL, control);
569 SPIN_UNTIL(xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_STATUS) &
570 XLBC_MII_STATUS_COMPLETE, XLBC_AUTONEG_TIMEOUT);
572 status = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_STATUS);
573 if (!(status & XLBC_MII_STATUS_COMPLETE))
574 return XLBC_LINK_UP;
577 /* The highest bit set in both registers is the selected link type. */
578 mask = xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_AUTONEG_ADV) &
579 xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_LP_ABILITY);
581 if (mask & XLBC_MII_LINK_TX_FD)
582 return XLBC_LINK_UP_TX_FD;
583 if (mask & XLBC_MII_LINK_TX_HD)
584 return XLBC_LINK_UP_TX_HD;
585 if (mask & XLBC_MII_LINK_T_FD)
586 return XLBC_LINK_UP_T_FD;
587 if (mask & XLBC_MII_LINK_T_HD)
588 return XLBC_LINK_UP_T_HD;
590 return XLBC_LINK_UP;
594 * Set the duplex mode to full or half, based on the current link type.
596 static void
597 xlbc_set_duplex(enum xlbc_link_type link)
599 uint16_t word;
600 int duplex;
603 * If the link is down, do not change modes. In fact, the link may go
604 * down as a result of the reset that is part of changing the mode.
606 if (link == XLBC_LINK_DOWN)
607 return;
609 /* See if the desired duplex mode differs from the current mode. */
610 duplex = (link == XLBC_LINK_UP_T_FD || link == XLBC_LINK_UP_TX_FD);
612 xlbc_select_window(XLBC_MAC_CTRL_WINDOW);
614 word = XLBC_READ_16(XLBC_MAC_CTRL_REG);
616 if (!!(word & XLBC_MAC_CTRL_ENA_FD) == duplex)
617 return; /* already in the desired mode */
620 * Change duplex mode. Unfortunately, that also means we need to
621 * reset the RX and TX engines. Fortunately, this should happen only
622 * on a link change, so we're probably not doing much extra damage.
623 * TODO: recovery for packets currently on the transmission queue.
625 XLBC_DEBUG(("%s: %s full-duplex mode\n", netdriver_name(),
626 duplex ? "setting" : "clearing"));
628 XLBC_WRITE_16(XLBC_MAC_CTRL_REG, word ^ XLBC_MAC_CTRL_ENA_FD);
630 xlbc_reset_rx();
632 xlbc_reset_tx();
636 * The link status has changed.
638 static void
639 xlbc_link_event(void)
641 enum xlbc_link_type link_type;
644 * The 3c90xB is documented to require a read from the internal
645 * auto-negotiation expansion MII register in order to clear the link
646 * event interrupt. The 3c90xC resets the link event interrupt as part
647 * of automatic interrupt acknowledgment.
649 (void)xlbc_mii_read(XLBC_PHY_ADDR, XLBC_MII_AUTONEG_EXP);
651 link_type = xlbc_get_link_type();
653 #if VERBOSE
654 XLBC_DEBUG(("%s: link %s\n", netdriver_name(),
655 xlbc_get_link_name(link_type)));
656 #endif
658 xlbc_set_duplex(link_type);
662 * Initialize the device.
664 static void
665 xlbc_init_hw(int devind, netdriver_addr_t * addr)
667 uint32_t bar;
668 uint16_t cr;
669 int r, io, irq;
671 /* Map in the device's memory-mapped registers. */
672 if ((r = pci_get_bar(devind, PCI_BAR_2, &bar, &state.size, &io)) != OK)
673 panic("unable to retrieve bar: %d", r);
675 if (state.size < XLBC_MIN_REG_SIZE || io)
676 panic("invalid register bar");
678 state.base = vm_map_phys(SELF, (void *)bar, state.size);
679 if (state.base == MAP_FAILED)
680 panic("unable to map in registers");
682 /* Reset the device to a known initial state. */
683 if (!xlbc_reset())
684 panic("unable to reset hardware");
686 /* Now that the device is reset, enable bus mastering if needed. */
687 cr = pci_attr_r8(devind, PCI_CR);
688 if (!(cr & PCI_CR_MAST_EN))
689 pci_attr_w8(devind, PCI_CR, cr | PCI_CR_MAST_EN);
691 /* Obtain and apply the hardware address. */
692 xlbc_get_hwaddr(addr);
694 xlbc_set_hwaddr(addr);
696 /* Perform various one-time initialization actions. */
697 xlbc_init_once();
699 /* Allocate memory for DMA. */
700 xlbc_alloc_dma();
702 /* Initialize the transmitter. */
703 xlbc_reset_tx();
705 /* Initialize the receiver. */
706 state.filter = XLBC_FILTER_STATION;
708 xlbc_reset_rx();
710 /* Enable interrupts. */
711 irq = pci_attr_r8(devind, PCI_ILR);
712 state.hook_id = 0;
714 if ((r = sys_irqsetpolicy(irq, 0, &state.hook_id)) != OK)
715 panic("unable to register IRQ: %d", r);
717 if ((r = sys_irqenable(&state.hook_id)) != OK)
718 panic("unable to enable IRQ: %d", r);
720 xlbc_issue_cmd(XLBC_CMD_INT_ENABLE | XLBC_STATUS_MASK);
723 * We will probably get a link event anyway, but trigger one now in
724 * case that does not happen. The main purpose of this call is to
725 * set the right duplex mode.
727 xlbc_link_event();
731 * Initialize the 3c90x driver and device.
733 static int
734 xlbc_init(unsigned int instance, netdriver_addr_t * addr, uint32_t * caps,
735 unsigned int * ticks)
737 int devind;
739 memset(&state, 0, sizeof(state));
741 /* Try to find a recognized device. */
742 if ((devind = xlbc_probe(instance)) < 0)
743 return ENXIO;
745 /* Initialize the device. */
746 xlbc_init_hw(devind, addr);
748 *caps = NDEV_CAP_MCAST | NDEV_CAP_BCAST;
749 *ticks = sys_hz() / 10; /* update statistics 10x/sec */
750 return OK;
754 * Stop the device. The main purpose is to stop any ongoing and future DMA.
756 static void
757 xlbc_stop(void)
760 /* A full reset ought to do it. */
761 (void)xlbc_reset();
765 * Set packet receipt mode.
767 static void
768 xlbc_set_mode(unsigned int mode, const netdriver_addr_t * mcast_list __unused,
769 unsigned int mcast_count __unused)
772 state.filter = XLBC_FILTER_STATION;
774 if (mode & (NDEV_MODE_MCAST_LIST | NDEV_MODE_MCAST_ALL))
775 state.filter |= XLBC_FILTER_MULTI;
776 if (mode & NDEV_MODE_BCAST)
777 state.filter |= XLBC_FILTER_BROAD;
778 if (mode & NDEV_MODE_PROMISC)
779 state.filter |= XLBC_FILTER_PROMISC;
781 xlbc_issue_cmd(XLBC_CMD_SET_FILTER | state.filter);
785 * Try to receive a packet.
787 static ssize_t
788 xlbc_recv(struct netdriver_data * data, size_t max)
790 uint32_t flags;
791 uint8_t *ptr;
792 unsigned int head;
793 size_t len;
795 head = state.upd_head;
796 flags = *(volatile uint32_t *)&state.upd_base[head].flags;
799 * The documentation implies, but does not state, that UP_COMPLETE is
800 * set whenever UP_ERROR is. We rely exclusively on UP_COMPLETE.
802 if (!(flags & XLBC_UP_COMPLETE))
803 return SUSPEND;
805 if (flags & XLBC_UP_ERROR) {
806 XLBC_DEBUG(("%s: received error\n", netdriver_name()));
808 netdriver_stat_ierror(1);
810 len = 0; /* immediately move on to the next descriptor */
811 } else {
812 len = flags & XLBC_UP_LEN;
814 XLBC_DEBUG(("%s: received packet (size %zu)\n",
815 netdriver_name(), len));
817 /* The device is supposed to not give us runt frames. */
818 assert(len >= XLBC_MIN_PKT_LEN);
820 /* Truncate large packets. */
821 if (flags & XLBC_UP_OVERFLOW)
822 len = XLBC_MAX_PKT_LEN;
823 if (len > max)
824 len = max;
826 ptr = state.rxb_base + head * XLBC_MAX_PKT_LEN;
828 netdriver_copyout(data, 0, ptr, len);
831 /* Mark the descriptor as ready for reuse. */
832 *(volatile uint32_t *)&state.upd_base[head].flags = 0;
835 * At this point, the receive engine may have stalled as a result of
836 * filling up all descriptors. Now that we have a free descriptor, we
837 * can restart it. As per the documentation, we unstall blindly.
839 xlbc_issue_cmd(XLBC_CMD_UP_UNSTALL);
841 /* Advance to the next descriptor in our ring. */
842 state.upd_head = (head + 1) % XLBC_UPD_COUNT;
844 return len;
848 * Return how much padding (if any) must be prepended to a packet of the given
849 * size so that it does not have to be split due to wrapping. The given offset
850 * is the starting point of the packet; this may be beyond the transmission
851 * buffer size in the case that the current buffer contents already wrap.
853 static size_t
854 xlbc_pad_tx(size_t off, size_t size)
857 if (off < XLBC_TXB_SIZE && off + size >= XLBC_TXB_SIZE)
858 return XLBC_TXB_SIZE - off;
859 else
860 return 0;
864 * Try to send a packet.
866 static int
867 xlbc_send(struct netdriver_data * data, size_t size)
869 size_t used, off, left;
870 unsigned int head, last;
871 uint32_t phys;
873 /* We need a free transmission descriptor. */
874 if (state.dpd_used == XLBC_DPD_COUNT)
875 return SUSPEND;
878 * See if we can fit the packet in the circular transmission buffer.
879 * The packet may not be broken up in two parts as the buffer wraps.
881 used = state.txb_used;
882 used += xlbc_pad_tx(state.txb_tail + used, size);
883 left = XLBC_TXB_SIZE - used;
885 if (left < size)
886 return SUSPEND;
888 XLBC_DEBUG(("%s: transmitting packet (size %zu)\n",
889 netdriver_name(), size));
891 /* Copy in the packet. */
892 off = (state.txb_tail + used) % XLBC_TXB_SIZE;
894 netdriver_copyin(data, 0, &state.txb_base[off], size);
896 /* Set up a descriptor for the packet. */
897 head = (state.dpd_tail + state.dpd_used) % XLBC_DPD_COUNT;
899 state.dpd_base[head].next = 0;
900 state.dpd_base[head].flags = XLBC_DN_RNDUP_WORD | XLBC_DN_DN_INDICATE;
901 state.dpd_base[head].addr = state.txb_phys + off;
902 state.dpd_base[head].len = XLBC_LEN_LAST | size;
904 phys = state.dpd_phys + head * sizeof(xlbc_pd_t);
906 __insn_barrier();
908 /* We need to stall only if other packets were already pending. */
909 if (XLBC_READ_32(XLBC_DN_LIST_PTR_REG) != 0) {
910 assert(state.dpd_used > 0);
912 xlbc_issue_cmd(XLBC_CMD_DN_STALL);
913 if (!xlbc_wait_cmd())
914 panic("timeout trying to stall downloads");
916 last = (state.dpd_tail + state.dpd_used - 1) % XLBC_DPD_COUNT;
917 state.dpd_base[last].next = phys;
918 /* Group interrupts a bit. This is a tradeoff. */
919 state.dpd_base[last].flags &= ~XLBC_DN_DN_INDICATE;
921 if (XLBC_READ_32(XLBC_DN_LIST_PTR_REG) == 0)
922 XLBC_WRITE_32(XLBC_DN_LIST_PTR_REG, phys);
924 xlbc_issue_cmd(XLBC_CMD_DN_UNSTALL);
925 } else
926 XLBC_WRITE_32(XLBC_DN_LIST_PTR_REG, phys);
928 /* Advance internal queue heads. */
929 state.dpd_used++;
931 state.txb_used = used + size;
932 assert(state.txb_used <= XLBC_TXB_SIZE);
934 return OK;
938 * One or more packets have been downloaded. Free up the corresponding
939 * descriptors for later reuse.
941 static void
942 xlbc_advance_tx(void)
944 uint32_t flags, len;
946 while (state.dpd_used > 0) {
947 flags = *(volatile uint32_t *)
948 &state.dpd_base[state.dpd_tail].flags;
950 if (!(flags & XLBC_DN_DN_COMPLETE))
951 break;
953 XLBC_DEBUG(("%s: packet copied to transmitter\n",
954 netdriver_name()));
956 len = state.dpd_base[state.dpd_tail].len & ~XLBC_LEN_LAST;
958 state.dpd_tail = (state.dpd_tail + 1) % XLBC_DPD_COUNT;
959 state.dpd_used--;
961 len += xlbc_pad_tx(state.txb_tail, len);
962 assert(state.txb_used >= len);
964 state.txb_tail = (state.txb_tail + len) % XLBC_TXB_SIZE;
965 state.txb_used -= len;
970 * A transmission error has occurred. Restart, and if necessary even reset,
971 * the transmitter.
973 static void
974 xlbc_recover_tx(void)
976 uint8_t status;
977 int enable, reset;
979 enable = reset = FALSE;
981 while ((status = XLBC_READ_8(XLBC_TX_STATUS_REG)) &
982 XLBC_TX_STATUS_COMPLETE) {
983 XLBC_DEBUG(("%s: transmission error (0x%04x)\n",
984 netdriver_name(), status));
986 /* This is an internal (non-packet) error status. */
987 if (status & XLBC_TX_STATUS_OVERFLOW)
988 enable = TRUE;
990 if (status & XLBC_TX_STATUS_MAX_COLL) {
991 netdriver_stat_coll(1);
992 enable = TRUE;
994 if (status &
995 (XLBC_TX_STATUS_UNDERRUN | XLBC_TX_STATUS_JABBER)) {
996 netdriver_stat_oerror(1);
997 reset = TRUE;
1000 XLBC_WRITE_8(XLBC_TX_STATUS_REG, status);
1003 if (reset) {
1005 * Below is the documented Underrun Recovery procedure. We use
1006 * it for jabber errors as well, because there is no indication
1007 * that another procedure should be followed for that case.
1009 xlbc_issue_cmd(XLBC_CMD_DN_STALL);
1010 if (!xlbc_wait_cmd())
1011 panic("download stall timeout during recovery");
1013 SPIN_UNTIL(!(XLBC_READ_32(XLBC_DMA_CTRL_REG) &
1014 XLBC_DMA_CTRL_DN_INPROG), XLBC_CMD_TIMEOUT);
1016 xlbc_select_window(XLBC_MEDIA_STS_WINDOW);
1018 SPIN_UNTIL(!(XLBC_READ_16(XLBC_MEDIA_STS_REG) &
1019 XLBC_MEDIA_STS_TX_INPROG), XLBC_CMD_TIMEOUT);
1021 xlbc_issue_cmd(XLBC_CMD_TX_RESET);
1022 if (!xlbc_wait_cmd())
1023 panic("transmitter reset timeout during recovery");
1025 xlbc_issue_cmd(XLBC_CMD_TX_ENABLE);
1027 XLBC_WRITE_32(XLBC_DN_LIST_PTR_REG,
1028 state.dpd_phys + state.dpd_tail * sizeof(xlbc_pd_t));
1030 XLBC_DEBUG(("%s: performed recovery\n", netdriver_name()));
1031 } else if (enable)
1032 xlbc_issue_cmd(XLBC_CMD_TX_ENABLE);
1036 * Update statistics. We read all registers, not just the ones we are
1037 * interested in, so as to limit the number of useless statistics interrupts.
1039 static void
1040 xlbc_update_stats(void)
1043 xlbc_select_window(XLBC_STATS_WINDOW);
1045 (void)XLBC_READ_8(XLBC_CARRIER_LOST_REG);
1046 (void)XLBC_READ_8(XLBC_SQE_ERR_REG);
1047 netdriver_stat_coll(XLBC_READ_8(XLBC_MULTI_COLL_REG));
1048 netdriver_stat_coll(XLBC_READ_8(XLBC_SINGLE_COLL_REG));
1049 netdriver_stat_coll(XLBC_READ_8(XLBC_LATE_COLL_REG));
1050 netdriver_stat_ierror(XLBC_READ_8(XLBC_RX_OVERRUNS_REG));
1051 (void)XLBC_READ_8(XLBC_FRAMES_DEFERRED_REG);
1053 (void)XLBC_READ_8(XLBC_UPPER_FRAMES_REG);
1054 (void)XLBC_READ_8(XLBC_FRAMES_XMIT_OK_REG);
1055 (void)XLBC_READ_8(XLBC_FRAMES_RCVD_OK_REG);
1057 (void)XLBC_READ_16(XLBC_BYTES_RCVD_OK_REG);
1058 (void)XLBC_READ_16(XLBC_BYTES_XMIT_OK_REG);
1060 xlbc_select_window(XLBC_SSD_STATS_WINDOW);
1062 (void)XLBC_READ_8(XLBC_BAD_SSD_REG);
1066 * Process an interrupt.
1068 static void
1069 xlbc_intr(unsigned int __unused mask)
1071 uint32_t val;
1072 int r;
1075 * Get interrupt mask. Acknowledge some interrupts, and disable all
1076 * interrupts as automatic side effect. The assumption is that any new
1077 * events are stored as indications which are then translated into
1078 * interrupts as soon as interrupts are reenabled, but this is not
1079 * documented explicitly.
1081 val = XLBC_READ_16(XLBC_STATUS_AUTO_REG);
1083 XLBC_DEBUG(("%s: interrupt (0x%04x)\n", netdriver_name(), val));
1085 if (val & XLBC_STATUS_UP_COMPLETE)
1086 netdriver_recv();
1088 if (val & (XLBC_STATUS_DN_COMPLETE | XLBC_STATUS_TX_COMPLETE))
1089 xlbc_advance_tx();
1091 if (val & XLBC_STATUS_TX_COMPLETE)
1092 xlbc_recover_tx();
1094 if (val & XLBC_STATUS_HOST_ERROR) {
1096 * A catastrophic host error has occurred. Reset both the
1097 * transmitter and the receiver. This should be enough to
1098 * clear the host error, but may be overkill in the cases where
1099 * the error direction (TX or RX) can be clearly identified.
1100 * Since this entire condition is effectively untestable, we
1101 * do not even try to be smart about it.
1103 XLBC_DEBUG(("%s: host error, performing reset\n",
1104 netdriver_name()));
1106 xlbc_reset_tx();
1108 xlbc_reset_rx();
1110 /* If this has not resolved the problem, restart the driver. */
1111 if (XLBC_READ_16(XLBC_STATUS_REG) & XLBC_STATUS_HOST_ERROR)
1112 panic("host error not cleared");
1115 if (val & XLBC_STATUS_UPDATE_STATS)
1116 xlbc_update_stats();
1118 if (val & XLBC_STATUS_LINK_EVENT)
1119 xlbc_link_event();
1121 /* See if we should try to send more packets. */
1122 if (val & (XLBC_STATUS_DN_COMPLETE | XLBC_STATUS_TX_COMPLETE |
1123 XLBC_STATUS_HOST_ERROR))
1124 netdriver_send();
1126 /* Reenable interrupts. */
1127 if ((r = sys_irqenable(&state.hook_id)) != OK)
1128 panic("unable to reenable IRQ: %d", r);
1130 xlbc_issue_cmd(XLBC_CMD_INT_ENABLE | XLBC_STATUS_MASK);
1134 * Do regular processing.
1136 static void
1137 xlbc_tick(void)
1140 xlbc_update_stats();
1144 * The 3c90x ethernet driver.
1147 main(int argc, char ** argv)
1150 env_setargs(argc, argv);
1152 netdriver_task(&xlbc_table);
1154 return EXIT_SUCCESS;