Check for SYS/GL during library init. Reason is that
[AROS.git] / workbench / devs / networks / e1000 / e1000.c
blob34b67e6e4c400f2c9f18a7df9c03169e88c75f0f
1 /*
2 * $Id$
3 */
4 /*
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 MA 02111-1307, USA.
21 #include <exec/types.h>
22 #include <exec/resident.h>
23 #include <exec/io.h>
24 #include <exec/ports.h>
26 #include <aros/libcall.h>
27 #include <aros/macros.h>
28 #include <aros/io.h>
30 #include <hardware/intbits.h>
32 #include <oop/oop.h>
34 #include <devices/sana2.h>
35 #include <devices/sana2specialstats.h>
37 #include <utility/utility.h>
38 #include <utility/tagitem.h>
39 #include <utility/hooks.h>
41 #include <hidd/pci.h>
43 #include <proto/oop.h>
44 #include <proto/exec.h>
45 #include <proto/dos.h>
46 #include <proto/battclock.h>
48 #include <stdlib.h>
50 #include "e1000_osdep.h"
51 #include "e1000.h"
52 #include "e1000_defines.h"
53 #include "e1000_api.h"
55 #include "unit.h"
56 #include LC_LIBDEFS_FILE
58 /* A bit fixed linux stuff here :) */
60 #undef LIBBASE
61 #define LIBBASE (unit->e1ku_device)
63 void e1000_usec_delay(struct net_device *unit, ULONG usec)
65 if (unit != NULL)
67 unit->e1ku_DelayPort.mp_SigTask = FindTask(NULL);
68 unit->e1ku_DelayReq.tr_node.io_Command = TR_ADDREQUEST;
69 unit->e1ku_DelayReq.tr_time.tv_micro = usec % 1000000;
70 unit->e1ku_DelayReq.tr_time.tv_secs = usec / 1000000;
72 DoIO((struct IORequest *)&unit->e1ku_DelayReq);
76 void e1000_msec_delay(struct net_device *unit, ULONG msec)
78 e1000_usec_delay(unit, 1000 * msec);
81 void e1000_msec_delay_irq(struct net_device *unit, ULONG msec)
83 //e1000_usec_delay(unit, 1000 * msec);
86 void MMIO_W8(APTR addr, UBYTE val8)
88 *((volatile UBYTE *)(addr)) = (val8);
90 MMIO_R8(addr);
93 void MMIO_W16(APTR addr, UWORD val16)
95 *((volatile UWORD *)(addr)) = (val16);
97 MMIO_R16(addr);
101 void MMIO_W32(APTR addr, ULONG val32)
103 *((volatile ULONG *)(addr)) = (val32);
105 MMIO_R32(addr);
108 static BOOL e1000func_check_64k_bound(struct net_device *unit,
109 void *start, unsigned long len)
111 unsigned long begin = (unsigned long) start;
112 unsigned long end = begin + len;
114 /* First rev 82545 and 82546 need to not allow any memory
115 * write location to cross 64k boundary due to errata 23 */
116 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82545 ||
117 ((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82546) {
118 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
121 return TRUE;
124 void e1000func_irq_disable(struct net_device *unit)
126 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_IMC, ~0);
127 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
130 void e1000func_irq_enable(struct net_device *unit)
132 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_IMS, IMS_ENABLE_MASK);
133 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
136 static void e1000func_enter_82542_rst(struct net_device *unit)
138 ULONG rctl;
140 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type != e1000_82542)
141 return;
142 if (((struct e1000_hw *)unit->e1ku_Private00)->revision_id != E1000_REVISION_2)
143 return;
145 e1000_pci_clear_mwi((struct e1000_hw *)unit->e1ku_Private00);
147 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
148 rctl |= E1000_RCTL_RST;
149 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
150 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
152 e1000_msec_delay(unit, 5);
154 // if (netif_running(netdev))
155 // e1000_clean_all_rx_rings(adapter);
158 static void e1000func_leave_82542_rst(struct net_device *unit)
160 ULONG rctl;
162 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type != e1000_82542)
163 return;
164 if (((struct e1000_hw *)unit->e1ku_Private00)->revision_id != E1000_REVISION_2)
165 return;
167 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
168 rctl &= ~E1000_RCTL_RST;
169 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
170 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
172 e1000_msec_delay(unit, 5);
174 if (((struct e1000_hw *)unit->e1ku_Private00)->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
175 e1000_pci_set_mwi((struct e1000_hw *)unit->e1ku_Private00);
177 // if (netif_running(netdev)) {
178 // /* No need to loop, because 82542 supports only 1 queue */
179 // struct e1000_rx_ring *ring = &adapter->rx_ring[0];
180 // e1000_configure_rx(adapter);
181 // adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
182 // }
185 static void e1000func_configure_tx(struct net_device *unit)
187 ULONG tdlen, tctl, tipg;
188 ULONG ipgr1, ipgr2;
189 UQUAD tdba;
190 int i;
192 D(bug("[%s]: %s(unit @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
194 /* Setup the HW Tx Head and Tail descriptor pointers */
195 for (i = 0; i < unit->e1ku_txRing_QueueSize; i++)
197 D(bug("[%s] %s: Tx Queue %d @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, &unit->e1ku_txRing[i]));
198 D(bug("[%s] %s: Tx Queue count = %d)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_txRing[i].count));
200 tdba = (IPTR)unit->e1ku_txRing[i].dma;
201 tdlen = (ULONG)(unit->e1ku_txRing[i].count * sizeof(struct e1000_tx_desc));
202 D(bug("[%s] %s: Tx Queue Ring Descriptor DMA @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_txRing[i].dma, tdlen));
204 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDBAL(i), (ULONG)(tdba & 0x00000000ffffffffULL));
205 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDBAH(i), (ULONG)(tdba >> 32));
206 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDLEN(i), tdlen);
207 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDH(i), 0);
208 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDT(i), 0);
209 unit->e1ku_txRing[i].tdh = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_TDH(i));
210 unit->e1ku_txRing[i].tdt = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_TDT(i));
211 D(bug("[%s] %s: Tx Queue TDH=%d, TDT=%d\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_txRing[i].tdh, unit->e1ku_txRing[i].tdt));
214 /* Set the default values for the Tx Inter Packet Gap timer */
215 if ((((struct e1000_hw *)unit->e1ku_Private00)->mac.type <= e1000_82547_rev_2) &&
216 ((((struct e1000_hw *)unit->e1ku_Private00)->phy.media_type == e1000_media_type_fiber) ||
217 (((struct e1000_hw *)unit->e1ku_Private00)->phy.media_type == e1000_media_type_internal_serdes)))
218 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
219 else
220 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
222 switch (((struct e1000_hw *)unit->e1ku_Private00)->mac.type)
224 case e1000_82542:
225 tipg = DEFAULT_82542_TIPG_IPGT;
226 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
227 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
228 break;
229 default:
230 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
231 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
232 break;
234 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
235 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
236 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TIPG, tipg);
238 /* Set the Tx Interrupt Delay register */
239 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TIDV, 0);
240 // if (unit->flags & E1000_FLAG_HAS_INTR_MODERATION)
241 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TADV, unit->tx_abs_int_delay);
243 /* Program the Transmit Control Register */
244 tctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TCTL);
245 tctl &= ~E1000_TCTL_CT;
246 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
247 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
249 e1000_config_collision_dist((struct e1000_hw *)unit->e1ku_Private00);
251 /* Setup Transmit Descriptor Settings for eop descriptor */
252 unit->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
254 /* only set IDE if we are delaying interrupts using the timers */
255 // if (unit->tx_int_delay)
256 // unit->txd_cmd |= E1000_TXD_CMD_IDE;
258 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type < e1000_82543)
259 unit->txd_cmd |= E1000_TXD_CMD_RPS;
260 else
261 unit->txd_cmd |= E1000_TXD_CMD_RS;
263 /* Cache if we're 82544 running in PCI-X because we'll
264 * need this to apply a workaround later in the send path. */
265 // if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82544 &&
266 // ((struct e1000_hw *)unit->e1ku_Private00)->bus.type == e1000_bus_type_pcix)
267 // adapter->pcix_82544 = 1;
269 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TCTL, tctl);
272 static void e1000func_setup_rctl(struct net_device *unit)
274 ULONG rctl;
276 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
278 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
280 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
282 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
283 (((struct e1000_hw *)unit->e1ku_Private00)->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
285 /* disable the stripping of CRC because it breaks
286 * BMC firmware connected over SMBUS
287 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type > e1000_82543)
288 rctl |= E1000_RCTL_SECRC;
291 if (e1000_tbi_sbp_enabled_82543((struct e1000_hw *)unit->e1ku_Private00))
292 rctl |= E1000_RCTL_SBP;
293 else
294 rctl &= ~E1000_RCTL_SBP;
296 if (unit->e1ku_mtu <= ETH_DATA_LEN)
297 rctl &= ~E1000_RCTL_LPE;
298 else
299 rctl |= E1000_RCTL_LPE;
301 /* Setup buffer sizes */
302 rctl &= ~E1000_RCTL_SZ_4096;
303 rctl |= E1000_RCTL_BSEX;
304 switch (unit->rx_buffer_len)
306 case E1000_RXBUFFER_256:
307 rctl |= E1000_RCTL_SZ_256;
308 rctl &= ~E1000_RCTL_BSEX;
309 break;
310 case E1000_RXBUFFER_512:
311 rctl |= E1000_RCTL_SZ_512;
312 rctl &= ~E1000_RCTL_BSEX;
313 break;
314 case E1000_RXBUFFER_1024:
315 rctl |= E1000_RCTL_SZ_1024;
316 rctl &= ~E1000_RCTL_BSEX;
317 break;
318 case E1000_RXBUFFER_2048:
319 default:
320 rctl |= E1000_RCTL_SZ_2048;
321 rctl &= ~E1000_RCTL_BSEX;
322 break;
323 case E1000_RXBUFFER_4096:
324 rctl |= E1000_RCTL_SZ_4096;
325 break;
326 case E1000_RXBUFFER_8192:
327 rctl |= E1000_RCTL_SZ_8192;
328 break;
329 case E1000_RXBUFFER_16384:
330 rctl |= E1000_RCTL_SZ_16384;
331 break;
334 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
337 static void e1000func_configure_rx(struct net_device *unit)
339 ULONG rdlen, rctl, rxcsum;
340 UQUAD rdba;
341 int i;
343 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
345 /* disable receivers while setting up the descriptors */
346 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
347 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl & ~E1000_RCTL_EN);
348 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
350 e1000_msec_delay(unit, 10);
352 /* set the Receive Delay Timer Register */
353 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDTR, 0);
355 // if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) {
356 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RADV, adapter->rx_abs_int_delay);
357 // if (adapter->itr_setting != 0)
358 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_ITR,
359 // 1000000000 / (adapter->itr * 256));
360 // }
362 /* Setup the HW Rx Head and Tail Descriptor Pointers and
363 * the Base and Length of the Rx Descriptor Ring */
364 for (i = 0; i < unit->e1ku_rxRing_QueueSize; i++)
366 D(bug("[%s] %s: Rx Queue %d @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, &unit->e1ku_rxRing[i]));
367 D(bug("[%s] %s: Rx Queue count = %d)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_rxRing[i].count));
369 rdlen = (ULONG)(unit->e1ku_rxRing[i].count * sizeof(struct e1000_rx_desc));
370 rdba = (IPTR)unit->e1ku_rxRing[i].dma;
371 D(bug("[%s] %s: Rx Queue Ring Descriptor DMA @ %p, [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_rxRing[i].dma, rdlen));
373 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDBAL(i), (ULONG)(rdba & 0x00000000ffffffffULL));
374 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDBAH(i), (ULONG)(rdba >> 32));
375 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDLEN(i), rdlen);
376 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDH(i), 0);
377 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDT(i), 0);
378 unit->e1ku_rxRing[i].rdh = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_RDH(i));
379 unit->e1ku_rxRing[i].rdt = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_RDT(i));
380 D(bug("[%s] %s: Rx Queue RDH=%d, RDT=%d\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_rxRing[i].rdh, unit->e1ku_rxRing[i].rdt));
383 D(bug("[%s] %s: Configuring checksum Offload..\n", unit->e1ku_name, __PRETTY_FUNCTION__));
385 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type >= e1000_82543)
387 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
388 rxcsum = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RXCSUM);
389 // if (unit->rx_csum == TRUE) {
390 // rxcsum |= E1000_RXCSUM_TUOFL;
391 // } else {
392 rxcsum &= ~E1000_RXCSUM_TUOFL;
393 /* don't need to clear IPPCSE as it defaults to 0 */
394 // }
395 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RXCSUM, rxcsum);
398 /* Enable Receivers */
399 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
402 void e1000func_reset(struct net_device *unit)
404 struct e1000_mac_info *mac = &((struct e1000_hw *)unit->e1ku_Private00)->mac;
405 struct e1000_fc_info *fc = &((struct e1000_hw *)unit->e1ku_Private00)->fc;
406 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
407 bool legacy_pba_adjust = FALSE;
408 u16 hwm;
410 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
412 /* Repartition Pba for greater than 9k mtu
413 To take effect CTRL.RST is required. */
415 switch (mac->type)
417 case e1000_82542:
418 case e1000_82543:
419 case e1000_82544:
420 case e1000_82540:
421 case e1000_82541:
422 case e1000_82541_rev_2:
423 legacy_pba_adjust = TRUE;
424 pba = E1000_PBA_48K;
425 break;
426 case e1000_82545:
427 case e1000_82545_rev_3:
428 case e1000_82546:
429 case e1000_82546_rev_3:
430 pba = E1000_PBA_48K;
431 break;
432 case e1000_82547:
433 case e1000_82547_rev_2:
434 legacy_pba_adjust = TRUE;
435 pba = E1000_PBA_30K;
436 break;
437 case e1000_undefined:
438 case e1000_num_macs:
439 break;
442 if (legacy_pba_adjust == TRUE) {
443 if (unit->e1ku_frame_max > E1000_RXBUFFER_8192)
444 pba -= 8; /* allocate more FIFO for Tx */
446 if (mac->type == e1000_82547) {
447 unit->e1ku_tx_fifo_head = 0;
448 unit->e1ku_tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
449 unit->e1ku_tx_fifo_size = (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
450 // atomic_set(&unit->tx_fifo_stall, 0);
452 } else if (unit->e1ku_frame_max > ETH_MAXPACKETSIZE) {
453 /* adjust PBA for jumbo frames */
454 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_PBA, pba);
456 /* To maintain wire speed transmits, the Tx FIFO should be
457 * large enough to accommodate two full transmit packets,
458 * rounded up to the next 1KB and expressed in KB. Likewise,
459 * the Rx FIFO should be large enough to accommodate at least
460 * one full receive packet and is similarly rounded up and
461 * expressed in KB. */
462 pba = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_PBA);
463 /* upper 16 bits has Tx packet buffer allocation size in KB */
464 tx_space = pba >> 16;
465 /* lower 16 bits has Rx packet buffer allocation size in KB */
466 pba &= 0xffff;
467 /* the tx fifo also stores 16 bytes of information about the tx
468 * but don't include ethernet FCS because hardware appends it */
469 min_tx_space = (unit->e1ku_frame_max + sizeof(struct e1000_tx_desc) - ETH_CRCSIZE) * 2;
470 min_tx_space = ALIGN(min_tx_space, 1024);
471 min_tx_space >>= 10;
472 /* software strips receive CRC, so leave room for it */
473 min_rx_space = unit->e1ku_frame_max;
474 min_rx_space = ALIGN(min_rx_space, 1024);
475 min_rx_space >>= 10;
477 /* If current Tx allocation is less than the min Tx FIFO size,
478 * and the min Tx FIFO size is less than the current Rx FIFO
479 * allocation, take space away from current Rx allocation */
480 if ((tx_space < min_tx_space) &&
481 ((min_tx_space - tx_space) < pba))
483 pba = pba - (min_tx_space - tx_space);
485 /* PCI/PCIx hardware has PBA alignment constraints */
486 switch (mac->type)
488 case e1000_82545 ... e1000_82546_rev_3:
489 pba &= ~(E1000_PBA_8K - 1);
490 break;
491 default:
492 break;
495 /* if short on rx space, rx wins and must trump tx
496 * adjustment or use Early Receive if available */
497 if (pba < min_rx_space)
499 pba = min_rx_space;
504 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_PBA, pba);
506 /* flow control settings */
507 /* The high water mark must be low enough to fit one full frame
508 * (or the size used for early receive) above it in the Rx FIFO.
509 * Set it to the lower of:
510 * - 90% of the Rx FIFO size, and
511 * - the full Rx FIFO size minus the early receive size (for parts
512 * with ERT support assuming ERT set to E1000_ERT_2048), or
513 * - the full Rx FIFO size minus one full frame */
514 hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - unit->e1ku_frame_max));
516 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
517 fc->low_water = fc->high_water - 8;
519 fc->pause_time = E1000_FC_PAUSE_TIME;
520 fc->send_xon = 1;
521 fc->current_mode = fc->requested_mode;
523 /* Allow time for pending master requests to run */
524 e1000_reset_hw((struct e1000_hw *)unit->e1ku_Private00);
526 if (mac->type >= e1000_82544)
528 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_WUC, 0);
531 if (e1000_init_hw((struct e1000_hw *)unit->e1ku_Private00))
533 D(bug("[%s] %s: Hardware Error\n", unit->e1ku_name, __PRETTY_FUNCTION__));
535 /* if (unit->hwflags & HWFLAGS_PHY_PWR_BIT) { */
536 if ((mac->type >= e1000_82544) &&
537 (mac->type <= e1000_82547_rev_2) &&
538 (mac->autoneg == 1) &&
539 (((struct e1000_hw *)unit->e1ku_Private00)->phy.autoneg_advertised == ADVERTISE_1000_FULL))
541 u32 ctrl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_CTRL);
542 /* clear phy power management bit if we are in gig only mode,
543 * which if enabled will attempt negotiation to 100Mb, which
544 * can cause a loss of link at power off or driver unload */
545 ctrl &= ~E1000_CTRL_SWDPIN3;
546 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_CTRL, ctrl);
549 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
550 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
552 e1000_reset_adaptive((struct e1000_hw *)unit->e1ku_Private00);
553 e1000_get_phy_info((struct e1000_hw *)unit->e1ku_Private00);
556 int e1000func_set_mac(struct net_device *unit)
558 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
560 /* 82542 2.0 needs to be in reset to write receive address registers */
561 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
563 e1000func_enter_82542_rst(unit);
566 memcpy(((struct e1000_hw *)unit->e1ku_Private00)->mac.addr, unit->e1ku_dev_addr, ETH_ADDRESSSIZE);
568 e1000_rar_set((struct e1000_hw *)unit->e1ku_Private00, ((struct e1000_hw *)unit->e1ku_Private00)->mac.addr, 0);
570 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
572 e1000func_leave_82542_rst(unit);
575 return 0;
578 void e1000func_set_multi(struct net_device *unit)
580 struct AddressRange *range;
581 UBYTE *mta_list;
582 ULONG rctl, mc_count;
583 int i = 0;
585 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
587 /* Check for Promiscuous and All Multicast modes */
589 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
591 if (unit->e1ku_ifflags & IFF_PROMISC) {
592 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
593 } else if (unit->e1ku_ifflags & IFF_ALLMULTI) {
594 rctl |= E1000_RCTL_MPE;
595 rctl &= ~E1000_RCTL_UPE;
596 } else {
597 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
600 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
602 /* 82542 2.0 needs to be in reset to write receive address registers */
604 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
605 e1000func_enter_82542_rst(unit);
607 ListLength(&unit->e1ku_multicast_ranges, mc_count);
609 if (mc_count > 0)
611 mta_list = AllocMem(mc_count * ETH_ADDRESSSIZE, MEMF_PUBLIC | MEMF_CLEAR );
612 if (!mta_list)
613 return;
615 /* The shared function expects a packed array of only addresses. */
616 ForeachNode(&unit->e1ku_multicast_ranges, range) {
617 memcpy(mta_list + (i*ETH_ADDRESSSIZE), &range->lower_bound, ETH_ADDRESSSIZE);
618 i++;
621 e1000_update_mc_addr_list((struct e1000_hw *)unit->e1ku_Private00, mta_list, i);
623 FreeMem(mta_list, mc_count * ETH_ADDRESSSIZE);
625 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
626 e1000func_leave_82542_rst(unit);
629 // static void e1000func_deinitialize(struct net_device *unit)
630 // {
631 // }
633 int request_irq(struct net_device *unit)
635 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
637 AddIntServer(INTB_KERNEL | unit->e1ku_IRQ, &unit->e1ku_irqhandler);
638 AddIntServer(INTB_VERTB, &unit->e1ku_touthandler);
640 D(bug("[%s] %s: IRQ Handlers configured\n", unit->e1ku_name, __PRETTY_FUNCTION__));
642 return 0;
645 #if 0
646 static void free_irq(struct net_device *unit)
648 RemIntServer(INTB_KERNEL | unit->e1ku_IRQ, unit->e1ku_irqhandler);
649 RemIntServer(INTB_VERTB, unit->e1ku_touthandler);
651 #endif
653 static int e1000func_setup_tx_resources(struct net_device *unit,
654 struct e1000_tx_ring *tx_ring)
656 ULONG size;
658 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
660 size = sizeof(struct e1000_buffer) * tx_ring->count;
662 D(bug("[%s] %s: Configuring for %d buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->count));
664 if ((tx_ring->buffer_info = AllocMem(size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
666 D(bug("[%s] %s: Unable to allocate memory for the transmit descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__));
667 return -E1000_ERR_CONFIG;
670 D(bug("[%s] %s: Tx Buffer Info @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->buffer_info, size));
672 /* round up to nearest 4K */
673 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
674 tx_ring->size = ALIGN(tx_ring->size, 4096);
676 if ((tx_ring->desc = AllocMem(tx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
678 setup_tx_desc_die:
679 FreeMem(tx_ring->buffer_info, size);
680 D(bug("[%s] %s: Unable to allocate memory for the transmit descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__));
681 return -E1000_ERR_CONFIG;
683 tx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)tx_ring->desc);
685 /* Fix for errata 23, can't cross 64kB boundary */
686 if (!e1000func_check_64k_bound(unit, tx_ring->desc, tx_ring->size))
688 void *olddesc = tx_ring->desc;
689 D(bug("[%s] %s: tx_ring align check failed: %u bytes at %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->size, tx_ring->desc));
690 /* Try again, without freeing the previous */
691 if ((tx_ring->desc = AllocMem(tx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
693 /* Failed allocation, critical failure */
694 FreeMem(olddesc, tx_ring->size);
695 tx_ring->dma = NULL;
696 goto setup_tx_desc_die;
698 tx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)tx_ring->desc);
700 if (!e1000func_check_64k_bound(unit, tx_ring->desc, tx_ring->size))
702 /* give up */
703 FreeMem(tx_ring->desc, tx_ring->size);
704 FreeMem(olddesc, tx_ring->size);
705 tx_ring->dma = NULL;
706 D(bug("[%s] %s: Unable to allocate aligned memory for the transmit descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__));
708 FreeMem(tx_ring->buffer_info, size);
709 return -E1000_ERR_CONFIG;
710 } else {
711 /* Free old allocation, new allocation was successful */
712 FreeMem(olddesc, tx_ring->size);
716 D(bug("[%s] %s: Tx Ring Descriptors @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->desc, tx_ring->size));
718 tx_ring->next_to_use = 0;
719 tx_ring->next_to_clean = 0;
721 return 0;
724 int e1000func_setup_all_tx_resources(struct net_device *unit)
726 int i, err = 0;
728 for (i = 0; i < unit->e1ku_txRing_QueueSize; i++)
730 err = e1000func_setup_tx_resources(unit, &unit->e1ku_txRing[i]);
731 if (err)
733 D(bug("[%s] %s: Allocation for Tx Queue %u failed\n", unit->e1ku_name, __PRETTY_FUNCTION__, i));
734 for (i-- ; i >= 0; i--)
736 e1000func_free_tx_resources(unit, &unit->e1ku_txRing[i]);
738 break;
742 return err;
745 static int e1000func_setup_rx_resources(struct net_device *unit,
746 struct e1000_rx_ring *rx_ring)
748 int buffer_size;
750 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
752 buffer_size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
754 D(bug("[%s] %s: Configuring for %d buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->count));
756 if ((rx_ring->buffer_info = AllocMem(buffer_size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL) {
757 D(bug("[%s] %s: Unable to allocate memory for the receive ring buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__));
758 return -E1000_ERR_CONFIG;
761 D(bug("[%s] %s: Rx Buffer Info @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->buffer_info, buffer_size));
763 /* Round up to nearest 4K */
764 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
765 rx_ring->size = ALIGN(rx_ring->size, 4096);
767 if ((rx_ring->desc = AllocMem(rx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
769 D(bug("[%s] %s: Unable to allocate memory for the receive ring descriptors\n", unit->e1ku_name, __PRETTY_FUNCTION__));
770 setup_rx_desc_die:
771 FreeMem(rx_ring->buffer_info, buffer_size);
772 return -E1000_ERR_CONFIG;
774 rx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)rx_ring->desc);
776 /* Fix for errata 23, can't cross 64kB boundary */
777 if (!e1000func_check_64k_bound(unit, rx_ring->desc, rx_ring->size))
779 void *olddesc = rx_ring->desc;
780 D(bug("[%s] %s: rx_ring align check failed: %u bytes at %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->size, rx_ring->desc));
782 /* Try again, without freeing the previous */
783 if ((rx_ring->desc = AllocMem(rx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
785 /* Failed allocation, critical failure */
786 FreeMem(olddesc, rx_ring->size);
787 rx_ring->dma = NULL;
788 D(bug("[%s] %s: Unable to allocate memory for the receive descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__));
789 goto setup_rx_desc_die;
791 rx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)rx_ring->desc);
793 if (!e1000func_check_64k_bound(unit, rx_ring->desc, rx_ring->size)) {
794 /* give up */
795 FreeMem(rx_ring->desc, rx_ring->size);
796 FreeMem(olddesc, rx_ring->size);
797 rx_ring->dma = NULL;
798 D(bug("[%s] %s: Unable to allocate aligned memory for the receive descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__));
799 goto setup_rx_desc_die;
800 } else {
801 /* Free old allocation, new allocation was successful */
802 FreeMem(olddesc, rx_ring->size);
806 D(bug("[%s] %s: Rx Ring Descriptors @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->desc, rx_ring->size));
808 /* set up ring defaults */
809 rx_ring->next_to_clean = 0;
810 rx_ring->next_to_use = 0;
812 return 0;
815 int e1000func_setup_all_rx_resources(struct net_device *unit)
817 int i, err = 0;
819 for (i = 0; i < unit->e1ku_rxRing_QueueSize; i++)
821 err = e1000func_setup_rx_resources(unit, &unit->e1ku_rxRing[i]);
822 if (err)
824 D(bug("[%s] %s: Allocation for Rx Queue %u failed\n", unit->e1ku_name, __PRETTY_FUNCTION__, i));
825 for (i-- ; i >= 0; i--)
827 e1000func_free_rx_resources(unit, &unit->e1ku_rxRing[i]);
829 break;
833 return err;
836 void e1000func_unmap_and_free_tx_resource(struct net_device *unit,
837 struct e1000_buffer *buffer_info)
839 if (buffer_info->dma) {
840 buffer_info->dma = NULL;
842 if (buffer_info->buffer) {
843 FreeMem(buffer_info->buffer, ETH_MAXPACKETSIZE);
844 buffer_info->buffer = NULL;
846 /* buffer_info must be completely set up in the transmit path */
849 void e1000func_clean_tx_ring(struct net_device *unit,
850 struct e1000_tx_ring *tx_ring)
852 struct e1000_buffer *buffer_info;
853 unsigned long size;
854 unsigned int i;
856 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
858 /* Free all the Tx ring buffers */
859 for (i = 0; i < tx_ring->count; i++) {
860 buffer_info = &tx_ring->buffer_info[i];
861 e1000func_unmap_and_free_tx_resource(unit, buffer_info);
864 size = sizeof(struct e1000_buffer) * tx_ring->count;
865 memset(tx_ring->buffer_info, 0, size);
867 /* Zero out the descriptor ring */
869 memset(tx_ring->desc, 0, tx_ring->size);
871 tx_ring->next_to_use = 0;
872 tx_ring->next_to_clean = 0;
873 // tx_ring->last_tx_tso = 0;
875 MMIO_W32((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdh), 0);
876 MMIO_W32((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdt), 0);
879 void e1000func_free_tx_resources(struct net_device *unit,
880 struct e1000_tx_ring *tx_ring)
882 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
884 e1000func_clean_tx_ring(unit, tx_ring);
886 FreeMem(tx_ring->buffer_info, sizeof(struct e1000_buffer) * tx_ring->count);
887 tx_ring->buffer_info = NULL;
889 FreeMem(tx_ring->desc, tx_ring->size);
890 tx_ring->dma = tx_ring->desc = NULL;
893 void e1000func_clean_rx_ring(struct net_device *unit,
894 struct e1000_rx_ring *rx_ring)
896 struct e1000_rx_buffer *buffer_info;
897 unsigned long size;
898 unsigned int i;
900 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
902 /* Free all the Rx ring buffers */
903 for (i = 0; i < rx_ring->count; i++) {
904 buffer_info = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];
905 if (buffer_info->dma != NULL) {
906 buffer_info->dma = NULL;
908 if (buffer_info->buffer)
910 FreeMem(buffer_info->buffer, unit->rx_buffer_len);
911 buffer_info->buffer = NULL;
915 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
916 memset(rx_ring->buffer_info, 0, size);
918 /* Zero out the descriptor ring */
919 memset(rx_ring->desc, 0, rx_ring->size);
921 rx_ring->next_to_clean = 0;
922 rx_ring->next_to_use = 0;
924 MMIO_W32((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdh), 0);
925 MMIO_W32((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt), 0);
928 void e1000func_free_rx_resources(struct net_device *unit,
929 struct e1000_rx_ring *rx_ring)
931 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
933 e1000func_clean_rx_ring(unit, rx_ring);
935 FreeMem(rx_ring->buffer_info, sizeof(struct e1000_rx_buffer) * rx_ring->count);
936 rx_ring->buffer_info = NULL;
938 FreeMem(rx_ring->desc, rx_ring->size);
939 rx_ring->dma = rx_ring->desc = NULL;
942 #if 0
943 static int e1000func_close(struct net_device *unit)
945 unit->e1ku_ifflags &= ~IFF_UP;
947 // ObtainSemaphore(&np->lock);
948 // np->in_shutdown = 1;
949 // ReleaseSemaphore(&np->lock);
951 unit->e1ku_toutNEED = FALSE;
953 // netif_stop_queue(unit);
954 // ObtainSemaphore(&np->lock);
956 // e1000func_deinitialize(unit); // Stop the chipset and set it in 16bit-mode
958 // ReleaseSemaphore(&np->lock);
960 free_irq(unit);
962 // drain_ring(unit);
964 // HIDD_PCIDriver_FreePCIMem(unit->e1ku_PCIDriver, np->rx_buffer);
965 // HIDD_PCIDriver_FreePCIMem(unit->e1ku_PCIDriver, np->tx_buffer);
967 ReportEvents(LIBBASE, unit, S2EVENT_OFFLINE);
969 return 0;
971 #endif
973 void e1000func_alloc_rx_buffers(struct net_device *unit,
974 struct e1000_rx_ring *rx_ring,
975 int cleaned_count)
977 struct e1000_rx_desc *rx_desc;
978 struct e1000_rx_buffer *buffer_info;
979 unsigned int i;
981 i = rx_ring->next_to_use;
983 while (cleaned_count--)
985 buffer_info = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];
987 if ((buffer_info->buffer = AllocMem(unit->rx_buffer_len, MEMF_PUBLIC|MEMF_CLEAR)) != NULL)
990 bug("[%s] %s: Buffer %d Allocated @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, buffer_info->buffer, unit->rx_buffer_len);
991 if ((buffer_info->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)buffer_info->buffer)) == NULL)
993 bug("[%s] %s: Failed to Map Buffer %d for DMA!!\n", unit->e1ku_name, __PRETTY_FUNCTION__, i);
995 bug("[%s] %s: Buffer %d DMA @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, buffer_info->dma);
998 rx_desc = E1000_RX_DESC(rx_ring, i);
999 // rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1000 rx_desc->buffer_addr = (IPTR)buffer_info->dma;
1003 if (++i == rx_ring->count)
1004 i = 0;
1007 if (rx_ring->next_to_use != i) {
1008 rx_ring->next_to_use = i;
1009 if (i-- == 0)
1010 i = (rx_ring->count - 1);
1012 MMIO_W32((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt), i);
1016 void e1000func_configure(struct net_device *unit)
1018 int i;
1020 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1022 e1000func_set_multi(unit);
1024 e1000func_configure_tx(unit);
1025 e1000func_setup_rctl(unit);
1026 e1000func_configure_rx(unit);
1027 D(bug("[%s] %s: Tx/Rx Configured\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1029 /* call E1000_DESC_UNUSED which always leaves
1030 * at least 1 descriptor unused to make sure
1031 * next_to_use != next_to_clean */
1032 for (i = 0; i < unit->e1ku_rxRing_QueueSize; i++)
1034 D(bug("[%s] %s: Allocating Rx Buffers for queue %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, i));
1035 struct e1000_rx_ring *ring = &unit->e1ku_rxRing[i];
1036 e1000func_alloc_rx_buffers(unit, ring, E1000_DESC_UNUSED(ring));
1038 D(bug("[%s] %s: Finished\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1041 BOOL e1000func_clean_tx_irq(struct net_device *unit,
1042 struct e1000_tx_ring *tx_ring)
1044 struct e1000_tx_desc *tx_desc, *eop_desc;
1045 struct e1000_buffer *buffer_info;
1046 unsigned int i, eop;
1047 BOOL cleaned = FALSE;
1048 BOOL retval = FALSE;
1049 unsigned int total_tx_packets=0;
1051 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1053 i = tx_ring->next_to_clean;
1054 eop = tx_ring->buffer_info[i].next_to_watch;
1055 eop_desc = E1000_TX_DESC(tx_ring, eop);
1057 D(bug("[%s] %s: starting at %d, eop=%d, desc @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, eop, eop_desc));
1059 while (eop_desc->upper.data & AROS_LONG2LE(E1000_TXD_STAT_DD)) {
1060 for (cleaned = FALSE; !cleaned; ) {
1061 D(bug("[%s] %s: cleaning Tx buffer %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, i));
1062 tx_desc = E1000_TX_DESC(tx_ring, i);
1063 buffer_info = &tx_ring->buffer_info[i];
1064 cleaned = (i == eop);
1066 if (cleaned) {
1067 retval = TRUE;
1068 total_tx_packets++;
1070 e1000func_unmap_and_free_tx_resource(unit, buffer_info);
1071 tx_desc->upper.data = 0;
1073 if (++i == tx_ring->count)
1074 i = 0;
1077 eop = tx_ring->buffer_info[i].next_to_watch;
1078 eop_desc = E1000_TX_DESC(tx_ring, eop);
1081 tx_ring->next_to_clean = i;
1083 #define TX_WAKE_THRESHOLD 32
1084 // if (cleaned && netif_carrier_ok(netdev) &&
1085 // E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) {
1086 /* Make sure that anybody stopping the queue after this
1087 * sees the new next_to_clean.
1089 // smp_mb();
1091 // if (netif_queue_stopped(netdev) &&
1092 // !(test_bit(__E1000_DOWN, &adapter->state))) {
1093 // netif_wake_queue(netdev);
1094 // ++adapter->restart_queue;
1095 // }
1096 // }
1098 if (unit->detect_tx_hung) {
1099 /* Detect a transmit hang in hardware, this serializes the
1100 * check with the clearing of time_stamp and movement of i */
1101 unit->detect_tx_hung = FALSE;
1102 if (tx_ring->buffer_info[eop].dma && !(E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_STATUS) & E1000_STATUS_TXOFF)) {
1103 /* detected Tx unit hang */
1105 bug("[%s] %s: Detected Tx Unit Hang -:\n", unit->e1ku_name);
1106 bug("[%s] %s: Tx Queue <%lu>\n", unit->e1ku_name, __PRETTY_FUNCTION__, (unsigned long)((tx_ring - unit->e1ku_txRing) / sizeof(struct e1000_tx_ring)));
1107 bug("[%s] %s: TDH <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, MMIO_R32(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdh));
1108 bug("[%s] %s: TDT <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, MMIO_R32(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdt));
1109 bug("[%s] %s: next_to_use <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->next_to_use);
1110 bug("[%s] %s: next_to_clean <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->next_to_clean);
1111 bug("[%s] %s: buffer_info[next_to_clean]\n", unit->e1ku_name, __PRETTY_FUNCTION__);
1112 bug("[%s] %s: next_to_watch <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, eop);
1113 bug("[%s] %s: next_to_watch.status <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, eop_desc->upper.fields.status);
1115 // netif_stop_queue(netdev);
1118 unit->e1ku_stats.PacketsSent += total_tx_packets;
1119 // adapter->total_tx_packets += total_tx_packets;
1120 return retval;
1123 BOOL e1000func_clean_rx_irq(struct net_device *unit,
1124 struct e1000_rx_ring *rx_ring)
1126 struct e1000_rx_desc *rx_desc, *next_rxd;
1127 D(struct e1000_rx_buffer *buffer_info, *next_buffer;)
1128 struct Opener *opener, *opener_tail;
1129 struct IOSana2Req *request, *request_tail;
1130 struct eth_frame *frame;
1132 unsigned int i, total_rx_bytes=0, total_rx_packets=0;
1133 int cleaned_count = 0;
1134 UBYTE status;
1135 ULONG length;
1136 BOOL accepted, is_orphan, cleaned = FALSE;
1138 i = rx_ring->next_to_clean;
1139 rx_desc = E1000_RX_DESC(rx_ring, i);
1140 D(buffer_info = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];)
1142 D(bug("[%s] %s: Starting at %d, Rx Desc @ %p, Buffer Info @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, rx_desc, buffer_info));
1144 while (rx_desc->status & E1000_RXD_STAT_DD) {
1145 cleaned = TRUE;
1146 status = rx_desc->status;
1147 length = AROS_LE2WORD(rx_desc->length);
1149 if (++i == rx_ring->count) i = 0;
1150 next_rxd = E1000_RX_DESC(rx_ring, i);
1152 D(next_buffer = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];);
1154 cleaned_count++;
1156 /* !EOP means multiple descriptors were used to store a single
1157 * packet, also make sure the frame isn't just CRC only */
1158 if (!(status & E1000_RXD_STAT_EOP) || (length <= ETH_CRCSIZE)) {
1159 /* All receives must fit into a single buffer */
1160 D(bug("[%s] %s: Receive packet consumed multiple buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1161 /* recycle */
1162 goto next_desc;
1165 frame = (struct eth_frame *)(IPTR)rx_desc->buffer_addr;
1167 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK){
1168 UBYTE last_byte = *(frame->eth_packet_data + length - 1);
1169 if (TBI_ACCEPT((struct e1000_hw *)unit->e1ku_Private00, status,
1170 rx_desc->errors, length, last_byte,
1171 unit->e1ku_frame_min,
1172 unit->e1ku_frame_max))
1174 e1000_tbi_adjust_stats_82543((struct e1000_hw *)unit->e1ku_Private00,
1175 unit->e1ku_hw_stats,
1176 length, frame->eth_packet_data,
1177 unit->e1ku_frame_max);
1179 length--;
1180 } else {
1181 /* recycle */
1182 goto next_desc;
1186 /* got a valid packet - forward it to the network core */
1187 is_orphan = TRUE;
1189 /* adjust length to remove Ethernet CRC, this must be
1190 * done after the TBI_ACCEPT workaround above */
1191 length -= ETH_CRCSIZE;
1193 /* probably a little skewed due to removing CRC */
1194 total_rx_bytes += length;
1195 total_rx_packets++;
1197 /* Receive Checksum Offload */
1198 // e1000func_rx_checksum(unit,
1199 // (ULONG)(status) |
1200 // ((ULONG)(rx_desc->errors) << 24),
1201 // AROS_LE2WORD(rx_desc->csum), skb);
1202 frame->eth_packet_crc[0] = (AROS_LE2WORD(rx_desc->csum) & 0xff000000) >> 24;
1203 frame->eth_packet_crc[1] = (AROS_LE2WORD(rx_desc->csum) & 0xff0000) >> 16;
1204 frame->eth_packet_crc[2] = (AROS_LE2WORD(rx_desc->csum) & 0xff00) >> 8;
1205 frame->eth_packet_crc[3] = AROS_LE2WORD(rx_desc->csum) & 0xff;
1207 /* Dump contents of frame if DEBUG enabled */
1209 int j;
1210 bug("[%s]: Rx Buffer %d Packet Dump -:", unit->e1ku_name, i);
1211 for (j=0; j<64; j++) {
1212 if ((j%16) == 0)
1214 bug("\n[%s]: %03x:", unit->e1ku_name, j);
1216 bug(" %02x", ((unsigned char*)frame)[j]);
1218 bug("\n");
1221 /* Check for address validity */
1222 if(AddressFilter(LIBBASE, unit, frame->eth_packet_dest))
1224 /* Packet is addressed to this driver */
1225 D(bug("[%s] %s: Packet IP accepted with type = %d, checksum = %x\n", unit->e1ku_name, __PRETTY_FUNCTION__, AROS_BE2WORD(frame->eth_packet_type), AROS_LE2WORD(rx_desc->csum)));
1227 opener = (APTR)unit->e1ku_Openers.mlh_Head;
1228 opener_tail = (APTR)&unit->e1ku_Openers.mlh_Tail;
1230 /* Offer packet to every opener */
1231 while(opener != opener_tail)
1233 request = (APTR)opener->read_port.mp_MsgList.lh_Head;
1234 request_tail = (APTR)&opener->read_port.mp_MsgList.lh_Tail;
1235 accepted = FALSE;
1237 /* Offer packet to each request until it's accepted */
1238 while((request != request_tail) && !accepted)
1240 if (request->ios2_PacketType == AROS_BE2WORD(frame->eth_packet_type))
1242 D(bug("[%s] %s: copy packet for opener ..\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1243 CopyPacket(LIBBASE, unit, request, length, AROS_BE2WORD(frame->eth_packet_type), frame);
1244 accepted = TRUE;
1246 request = (struct IOSana2Req *)request->ios2_Req.io_Message.mn_Node.ln_Succ;
1249 if(accepted)
1250 is_orphan = FALSE;
1252 opener = (APTR)opener->node.mln_Succ;
1255 /* If packet was unwanted, give it to S2_READORPHAN request */
1256 if(is_orphan)
1258 unit->e1ku_stats.UnknownTypesReceived++;
1260 if(!IsMsgPortEmpty(unit->e1ku_request_ports[ADOPT_QUEUE]))
1262 CopyPacket(LIBBASE, unit,
1263 (APTR)unit->e1ku_request_ports[ADOPT_QUEUE]->
1264 mp_MsgList.lh_Head, length, AROS_BE2WORD(frame->eth_packet_type), frame);
1265 D(bug("[%s] %s: packet copied to orphan queue\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1270 next_desc:
1271 rx_desc->status = 0;
1273 /* use prefetched values */
1274 rx_desc = next_rxd;
1275 D(buffer_info = next_buffer);
1277 rx_ring->next_to_clean = i;
1279 D(bug("[%s] %s: Next to clean = %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->next_to_clean));
1281 // if ((cleaned_count = E1000_DESC_UNUSED(rx_ring)))
1282 // writel(i, ((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt);
1284 unit->e1ku_stats.PacketsReceived += total_rx_packets;
1285 //adapter->total_rx_packets += total_rx_packets;
1286 //adapter->total_rx_bytes += total_rx_bytes;
1287 D(bug("[%s] %s: Received %d packets (%d bytes)\n", unit->e1ku_name, __PRETTY_FUNCTION__, total_rx_packets, total_rx_bytes));
1289 return cleaned;
1292 /** OS SUPPORT CALLS FOR INTEL CODE **/
1294 void e1000_pci_clear_mwi(struct e1000_hw *hw)
1296 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg;
1298 D(bug("[%s]: %s()\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__));
1300 /* Check if the devices cache line size is set first ?*/
1301 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1302 pciwritemsg.reg = 0x04;
1303 pciwritemsg.val = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg) & ~0x0010;
1304 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_WriteConfigWord);
1305 OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg);
1308 void e1000_pci_set_mwi(struct e1000_hw *hw)
1310 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg;
1312 D(bug("[%s]: %s()\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__));
1314 /* Check if the devices cache line size is set first ?*/
1315 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1316 pciwritemsg.reg = 0x04;
1317 pciwritemsg.val = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg) | 0x0010;
1318 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_WriteConfigWord);
1319 OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg);
1322 LONG e1000_read_pcie_cap_reg(struct e1000_hw *hw, ULONG reg, UWORD *value)
1324 struct pHidd_PCIDevice_ReadConfigWord pcireadmsg;
1326 D(bug("[%s]: %s(reg:%d)\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, reg));
1328 if (((struct e1000Unit *)hw->back)->e1ku_PCIeCap)
1330 pcireadmsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1331 pcireadmsg.reg = ((struct e1000Unit *)hw->back)->e1ku_PCIeCap + reg;
1332 *value = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pcireadmsg);
1333 D(bug("[%s] %s: ------> [%04x]\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, *value));
1334 return (E1000_SUCCESS);
1337 return 0;
1340 void e1000_read_pci_cfg(struct e1000_hw *hw, ULONG reg, UWORD *value)
1342 struct pHidd_PCIDevice_ReadConfigWord pcireadmsg;
1343 D(bug("[%s]: %s(reg:%d)\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, reg));
1345 pcireadmsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1346 pcireadmsg.reg = reg;
1347 *value = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pcireadmsg);
1348 D(bug("[%s] %s: ------> [%04x]\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, *value));
1351 void e1000_write_pci_cfg(struct e1000_hw *hw, ULONG reg, UWORD *value)
1353 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg;
1354 D(bug("[%s]: %s(reg:%d, %04x)\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, reg, *value));
1356 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_WriteConfigWord);
1357 pciwritemsg.reg = reg;
1358 pciwritemsg.val = *value;
1359 OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg);