revert 213 commits (to 56092) from the last month. 10 still need work to resolve...
[AROS.git] / workbench / devs / networks / e1000 / e1000.c
blob2e6ea6688c587f82f0b35c6facfc580ac0822c4b
1 /*
2 * $Id$
3 */
4 /*
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 MA 02111-1307, USA.
21 #include <exec/types.h>
22 #include <exec/resident.h>
23 #include <exec/io.h>
24 #include <exec/ports.h>
26 #include <aros/libcall.h>
27 #include <aros/macros.h>
28 #include <aros/io.h>
30 #include <hardware/intbits.h>
32 #include <oop/oop.h>
34 #include <devices/sana2.h>
35 #include <devices/sana2specialstats.h>
37 #include <utility/utility.h>
38 #include <utility/tagitem.h>
39 #include <utility/hooks.h>
41 #include <hidd/pci.h>
43 #include <proto/oop.h>
44 #include <proto/exec.h>
45 #include <proto/dos.h>
46 #include <proto/battclock.h>
48 #include <stdlib.h>
50 #include "e1000_osdep.h"
51 #include "e1000.h"
52 #include "e1000_defines.h"
53 #include "e1000_api.h"
55 #include "unit.h"
56 #include LC_LIBDEFS_FILE
58 /* A bit fixed linux stuff here :) */
60 #undef LIBBASE
61 #define LIBBASE (unit->e1ku_device)
63 void e1000_usec_delay(struct net_device *unit, ULONG usec)
65 if (unit != NULL)
67 unit->e1ku_DelayPort.mp_SigTask = FindTask(NULL);
68 unit->e1ku_DelayReq.tr_node.io_Command = TR_ADDREQUEST;
69 unit->e1ku_DelayReq.tr_time.tv_micro = usec % 1000000;
70 unit->e1ku_DelayReq.tr_time.tv_secs = usec / 1000000;
72 DoIO((struct IORequest *)&unit->e1ku_DelayReq);
76 void e1000_msec_delay(struct net_device *unit, ULONG msec)
78 e1000_usec_delay(unit, 1000 * msec);
81 void e1000_msec_delay_irq(struct net_device *unit, ULONG msec)
83 //e1000_usec_delay(unit, 1000 * msec);
86 static BOOL e1000func_check_64k_bound(struct net_device *unit,
87 void *start, unsigned long len)
89 unsigned long begin = (unsigned long) start;
90 unsigned long end = begin + len;
92 /* First rev 82545 and 82546 need to not allow any memory
93 * write location to cross 64k boundary due to errata 23 */
94 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82545 ||
95 ((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82546) {
96 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
99 return TRUE;
102 void e1000func_irq_disable(struct net_device *unit)
104 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_IMC, ~0);
105 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
108 void e1000func_irq_enable(struct net_device *unit)
110 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_IMS, IMS_ENABLE_MASK);
111 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
114 static void e1000_clean_all_rx_rings(struct net_device *unit)
116 D(bug("[%s]: %s(unit @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
117 // e1000func_clean_rx_ring(unit, unit->e1ku_rxRing);
120 static void e1000func_enter_82542_rst(struct net_device *unit)
122 ULONG rctl;
124 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type != e1000_82542)
125 return;
126 if (((struct e1000_hw *)unit->e1ku_Private00)->revision_id != E1000_REVISION_2)
127 return;
129 D(bug("[%s]: %s(unit @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
131 e1000_pci_clear_mwi((struct e1000_hw *)unit->e1ku_Private00);
133 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
134 rctl |= E1000_RCTL_RST;
135 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
136 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
138 e1000_msec_delay(unit, 5);
140 // if (netif_running(netdev))
141 e1000_clean_all_rx_rings(unit);
144 static void e1000func_leave_82542_rst(struct net_device *unit)
146 ULONG rctl;
148 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type != e1000_82542)
149 return;
150 if (((struct e1000_hw *)unit->e1ku_Private00)->revision_id != E1000_REVISION_2)
151 return;
153 D(bug("[%s]: %s(unit @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
155 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
156 rctl &= ~E1000_RCTL_RST;
157 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
158 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
160 e1000_msec_delay(unit, 5);
162 if (((struct e1000_hw *)unit->e1ku_Private00)->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
163 e1000_pci_set_mwi((struct e1000_hw *)unit->e1ku_Private00);
165 // if (netif_running(netdev)) {
166 // /* No need to loop, because 82542 supports only 1 queue */
167 // struct e1000_rx_ring *ring = &adapter->rx_ring[0];
168 // e1000_configure_rx(adapter);
169 // adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
170 // }
173 static void e1000func_configure_tx(struct net_device *unit)
175 ULONG tdlen, tctl, tipg;
176 ULONG ipgr1, ipgr2;
177 UQUAD tdba;
178 int i;
180 D(bug("[%s]: %s(unit @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
182 /* Setup the HW Tx Head and Tail descriptor pointers */
183 for (i = 0; i < unit->e1ku_txRing_QueueSize; i++)
185 D(bug("[%s] %s: Tx Queue %d @ %p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, &unit->e1ku_txRing[i]));
186 D(bug("[%s] %s: Tx Queue count = %d)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_txRing[i].count));
188 tdba = (IPTR)unit->e1ku_txRing[i].dma;
189 tdlen = (ULONG)(unit->e1ku_txRing[i].count * sizeof(struct e1000_tx_desc));
190 D(bug("[%s] %s: Tx Queue Ring Descriptor DMA @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_txRing[i].dma, tdlen));
192 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDBAL(i), (ULONG)(tdba & 0x00000000ffffffffULL));
193 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDBAH(i), (ULONG)(tdba >> 32));
194 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDLEN(i), tdlen);
195 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDH(i), 0);
196 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TDT(i), 0);
197 unit->e1ku_txRing[i].tdh = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_TDH(i));
198 unit->e1ku_txRing[i].tdt = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_TDT(i));
199 D(bug("[%s] %s: Tx Queue TDH=%d, TDT=%d\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_txRing[i].tdh, unit->e1ku_txRing[i].tdt));
202 /* Set the default values for the Tx Inter Packet Gap timer */
203 if ((((struct e1000_hw *)unit->e1ku_Private00)->mac.type <= e1000_82547_rev_2) &&
204 ((((struct e1000_hw *)unit->e1ku_Private00)->phy.media_type == e1000_media_type_fiber) ||
205 (((struct e1000_hw *)unit->e1ku_Private00)->phy.media_type == e1000_media_type_internal_serdes)))
206 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
207 else
208 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
210 switch (((struct e1000_hw *)unit->e1ku_Private00)->mac.type)
212 case e1000_82542:
213 tipg = DEFAULT_82542_TIPG_IPGT;
214 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
215 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
216 break;
217 default:
218 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
219 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
220 break;
222 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
223 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
224 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TIPG, tipg);
226 /* Set the Tx Interrupt Delay register */
227 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TIDV, 0);
228 // if (unit->flags & E1000_FLAG_HAS_INTR_MODERATION)
229 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TADV, unit->tx_abs_int_delay);
231 /* Program the Transmit Control Register */
232 tctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TCTL);
233 tctl &= ~E1000_TCTL_CT;
234 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
235 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
237 e1000_config_collision_dist((struct e1000_hw *)unit->e1ku_Private00);
239 /* Setup Transmit Descriptor Settings for eop descriptor */
240 unit->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
242 /* only set IDE if we are delaying interrupts using the timers */
243 // if (unit->tx_int_delay)
244 // unit->txd_cmd |= E1000_TXD_CMD_IDE;
246 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type < e1000_82543)
247 unit->txd_cmd |= E1000_TXD_CMD_RPS;
248 else
249 unit->txd_cmd |= E1000_TXD_CMD_RS;
251 /* Cache if we're 82544 running in PCI-X because we'll
252 * need this to apply a workaround later in the send path. */
253 // if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82544 &&
254 // ((struct e1000_hw *)unit->e1ku_Private00)->bus.type == e1000_bus_type_pcix)
255 // adapter->pcix_82544 = 1;
257 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TCTL, tctl);
260 static void e1000func_setup_rctl(struct net_device *unit)
262 ULONG rctl;
264 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
266 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
268 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
270 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
271 (((struct e1000_hw *)unit->e1ku_Private00)->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
273 /* disable the stripping of CRC because it breaks
274 * BMC firmware connected over SMBUS
275 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type > e1000_82543)
276 rctl |= E1000_RCTL_SECRC;
279 if (e1000_tbi_sbp_enabled_82543((struct e1000_hw *)unit->e1ku_Private00))
280 rctl |= E1000_RCTL_SBP;
281 else
282 rctl &= ~E1000_RCTL_SBP;
284 if (unit->e1ku_mtu <= ETH_DATA_LEN)
285 rctl &= ~E1000_RCTL_LPE;
286 else
287 rctl |= E1000_RCTL_LPE;
289 /* Setup buffer sizes */
290 rctl &= ~E1000_RCTL_SZ_4096;
291 rctl |= E1000_RCTL_BSEX;
292 switch (unit->rx_buffer_len)
294 case E1000_RXBUFFER_256:
295 rctl |= E1000_RCTL_SZ_256;
296 rctl &= ~E1000_RCTL_BSEX;
297 break;
298 case E1000_RXBUFFER_512:
299 rctl |= E1000_RCTL_SZ_512;
300 rctl &= ~E1000_RCTL_BSEX;
301 break;
302 case E1000_RXBUFFER_1024:
303 rctl |= E1000_RCTL_SZ_1024;
304 rctl &= ~E1000_RCTL_BSEX;
305 break;
306 case E1000_RXBUFFER_2048:
307 default:
308 rctl |= E1000_RCTL_SZ_2048;
309 rctl &= ~E1000_RCTL_BSEX;
310 break;
311 case E1000_RXBUFFER_4096:
312 rctl |= E1000_RCTL_SZ_4096;
313 break;
314 case E1000_RXBUFFER_8192:
315 rctl |= E1000_RCTL_SZ_8192;
316 break;
317 case E1000_RXBUFFER_16384:
318 rctl |= E1000_RCTL_SZ_16384;
319 break;
322 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
325 static void e1000func_configure_rx(struct net_device *unit)
327 ULONG rdlen, rctl, rxcsum;
328 UQUAD rdba;
329 int i;
331 D(bug("[%s]: %s(0x%p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
333 /* disable receivers while setting up the descriptors */
334 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
335 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl & ~E1000_RCTL_EN);
336 E1000_WRITE_FLUSH((struct e1000_hw *)unit->e1ku_Private00);
338 e1000_msec_delay(unit, 10);
340 /* set the Receive Delay Timer Register */
341 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDTR, 0);
343 // if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) {
344 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RADV, adapter->rx_abs_int_delay);
345 // if (adapter->itr_setting != 0)
346 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_ITR,
347 // 1000000000 / (adapter->itr * 256));
348 // }
350 /* Setup the HW Rx Head and Tail Descriptor Pointers and
351 * the Base and Length of the Rx Descriptor Ring */
352 for (i = 0; i < unit->e1ku_rxRing_QueueSize; i++)
354 D(bug("[%s] %s: Rx Queue %d @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, &unit->e1ku_rxRing[i]));
355 D(bug("[%s] %s: count = %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_rxRing[i].count));
357 rdlen = (ULONG)(unit->e1ku_rxRing[i].count * sizeof(struct e1000_rx_desc));
358 rdba = (IPTR)unit->e1ku_rxRing[i].dma;
359 D(bug("[%s] %s: Ring Descriptor DMA @ %p, [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_rxRing[i].dma, rdlen));
361 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDBAL(i), (ULONG)(rdba & 0x00000000ffffffffULL));
362 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDBAH(i), (ULONG)(rdba >> 32));
363 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDLEN(i), rdlen);
364 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDH(i), 0);
365 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RDT(i), 0);
366 unit->e1ku_rxRing[i].rdh = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_RDH(i));
367 unit->e1ku_rxRing[i].rdt = E1000_REGISTER((struct e1000_hw *)unit->e1ku_Private00, E1000_RDT(i));
368 D(bug("[%s] %s: RDH Reg %d, RDT Reg %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit->e1ku_rxRing[i].rdh, unit->e1ku_rxRing[i].rdt));
369 D(bug("[%s] %s: RDH = %d, RDT = %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, readl((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + unit->e1ku_rxRing[i].rdh)), readl((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + unit->e1ku_rxRing[i].rdt))));
372 D(bug("[%s] %s: Configuring checksum Offload..\n", unit->e1ku_name, __PRETTY_FUNCTION__));
374 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type >= e1000_82543)
376 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
377 rxcsum = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RXCSUM);
378 // if (unit->rx_csum == TRUE) {
379 // rxcsum |= E1000_RXCSUM_TUOFL;
380 // } else {
381 rxcsum &= ~E1000_RXCSUM_TUOFL;
382 /* don't need to clear IPPCSE as it defaults to 0 */
383 // }
384 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RXCSUM, rxcsum);
387 /* Enable Receivers */
388 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
391 void e1000func_reset(struct net_device *unit)
393 struct e1000_mac_info *mac = &((struct e1000_hw *)unit->e1ku_Private00)->mac;
394 struct e1000_fc_info *fc = &((struct e1000_hw *)unit->e1ku_Private00)->fc;
395 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
396 bool legacy_pba_adjust = FALSE;
397 u16 hwm;
399 D(bug("[%s]: %s(0x%p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit);)
401 /* Repartition Pba for greater than 9k mtu
402 To take effect CTRL.RST is required. */
404 switch (mac->type)
406 case e1000_82542:
407 case e1000_82543:
408 case e1000_82544:
409 case e1000_82540:
410 case e1000_82541:
411 case e1000_82541_rev_2:
412 legacy_pba_adjust = TRUE;
413 pba = E1000_PBA_48K;
414 break;
415 case e1000_82545:
416 case e1000_82545_rev_3:
417 case e1000_82546:
418 case e1000_82546_rev_3:
419 pba = E1000_PBA_48K;
420 break;
421 case e1000_82547:
422 case e1000_82547_rev_2:
423 legacy_pba_adjust = TRUE;
424 pba = E1000_PBA_30K;
425 break;
426 case e1000_undefined:
427 case e1000_num_macs:
428 break;
431 if (legacy_pba_adjust == TRUE) {
432 if (unit->e1ku_frame_max > E1000_RXBUFFER_8192)
433 pba -= 8; /* allocate more FIFO for Tx */
435 if (mac->type == e1000_82547) {
436 unit->e1ku_tx_fifo_head = 0;
437 unit->e1ku_tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
438 unit->e1ku_tx_fifo_size = (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
439 // atomic_set(&unit->tx_fifo_stall, 0);
441 } else if (unit->e1ku_frame_max > ETH_MAXPACKETSIZE) {
442 /* adjust PBA for jumbo frames */
443 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_PBA, pba);
445 /* To maintain wire speed transmits, the Tx FIFO should be
446 * large enough to accommodate two full transmit packets,
447 * rounded up to the next 1KB and expressed in KB. Likewise,
448 * the Rx FIFO should be large enough to accommodate at least
449 * one full receive packet and is similarly rounded up and
450 * expressed in KB. */
451 pba = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_PBA);
452 /* upper 16 bits has Tx packet buffer allocation size in KB */
453 tx_space = pba >> 16;
454 /* lower 16 bits has Rx packet buffer allocation size in KB */
455 pba &= 0xffff;
456 /* the tx fifo also stores 16 bytes of information about the tx
457 * but don't include ethernet FCS because hardware appends it */
458 min_tx_space = (unit->e1ku_frame_max + sizeof(struct e1000_tx_desc) - ETH_CRCSIZE) * 2;
459 min_tx_space = ALIGN(min_tx_space, 1024);
460 min_tx_space >>= 10;
461 /* software strips receive CRC, so leave room for it */
462 min_rx_space = unit->e1ku_frame_max;
463 min_rx_space = ALIGN(min_rx_space, 1024);
464 min_rx_space >>= 10;
466 /* If current Tx allocation is less than the min Tx FIFO size,
467 * and the min Tx FIFO size is less than the current Rx FIFO
468 * allocation, take space away from current Rx allocation */
469 if ((tx_space < min_tx_space) &&
470 ((min_tx_space - tx_space) < pba))
472 pba = pba - (min_tx_space - tx_space);
474 /* PCI/PCIx hardware has PBA alignment constraints */
475 switch (mac->type)
477 case e1000_82545 ... e1000_82546_rev_3:
478 pba &= ~(E1000_PBA_8K - 1);
479 break;
480 default:
481 break;
484 /* if short on rx space, rx wins and must trump tx
485 * adjustment or use Early Receive if available */
486 if (pba < min_rx_space)
488 pba = min_rx_space;
493 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_PBA, pba);
494 D(bug("[%s]: %s: pba = %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, pba);)
496 /* flow control settings */
497 /* The high water mark must be low enough to fit one full frame
498 * (or the size used for early receive) above it in the Rx FIFO.
499 * Set it to the lower of:
500 * - 90% of the Rx FIFO size, and
501 * - the full Rx FIFO size minus the early receive size (for parts
502 * with ERT support assuming ERT set to E1000_ERT_2048), or
503 * - the full Rx FIFO size minus one full frame */
504 hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - unit->e1ku_frame_max));
506 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
507 fc->low_water = fc->high_water - 8;
509 fc->pause_time = E1000_FC_PAUSE_TIME;
510 fc->send_xon = 1;
511 fc->current_mode = fc->requested_mode;
513 /* Allow time for pending master requests to run */
514 e1000_reset_hw((struct e1000_hw *)unit->e1ku_Private00);
516 if (mac->type >= e1000_82544)
518 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_WUC, 0);
521 if (e1000_init_hw((struct e1000_hw *)unit->e1ku_Private00))
523 D(bug("[%s] %s: Hardware Error\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
525 /* if (unit->hwflags & HWFLAGS_PHY_PWR_BIT) { */
526 if ((mac->type >= e1000_82544) &&
527 (mac->type <= e1000_82547_rev_2) &&
528 (mac->autoneg == 1) &&
529 (((struct e1000_hw *)unit->e1ku_Private00)->phy.autoneg_advertised == ADVERTISE_1000_FULL))
531 u32 ctrl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_CTRL);
532 /* clear phy power management bit if we are in gig only mode,
533 * which if enabled will attempt negotiation to 100Mb, which
534 * can cause a loss of link at power off or driver unload */
535 ctrl &= ~E1000_CTRL_SWDPIN3;
536 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_CTRL, ctrl);
539 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
540 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
542 e1000_reset_adaptive((struct e1000_hw *)unit->e1ku_Private00);
543 e1000_get_phy_info((struct e1000_hw *)unit->e1ku_Private00);
546 int e1000func_set_mac(struct net_device *unit)
548 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
550 /* 82542 2.0 needs to be in reset to write receive address registers */
551 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
553 e1000func_enter_82542_rst(unit);
556 memcpy(((struct e1000_hw *)unit->e1ku_Private00)->mac.addr, unit->e1ku_dev_addr, ETH_ADDRESSSIZE);
558 e1000_rar_set((struct e1000_hw *)unit->e1ku_Private00, ((struct e1000_hw *)unit->e1ku_Private00)->mac.addr, 0);
560 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
562 e1000func_leave_82542_rst(unit);
565 return 0;
568 void e1000func_set_multi(struct net_device *unit)
570 struct AddressRange *range;
571 UBYTE *mta_list;
572 ULONG rctl, mc_count;
573 int i = 0;
575 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
577 /* Check for Promiscuous and All Multicast modes */
579 rctl = E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL);
581 if (unit->e1ku_ifflags & IFF_PROMISC) {
582 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
583 } else if (unit->e1ku_ifflags & IFF_ALLMULTI) {
584 rctl |= E1000_RCTL_MPE;
585 rctl &= ~E1000_RCTL_UPE;
586 } else {
587 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
590 E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RCTL, rctl);
592 /* 82542 2.0 needs to be in reset to write receive address registers */
594 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
595 e1000func_enter_82542_rst(unit);
597 ListLength(&unit->e1ku_multicast_ranges, mc_count);
599 if (mc_count > 0)
601 mta_list = AllocMem(mc_count * ETH_ADDRESSSIZE, MEMF_PUBLIC | MEMF_CLEAR );
602 if (!mta_list)
603 return;
605 /* The shared function expects a packed array of only addresses. */
606 ForeachNode(&unit->e1ku_multicast_ranges, range) {
607 memcpy(mta_list + (i*ETH_ADDRESSSIZE), &range->lower_bound, ETH_ADDRESSSIZE);
608 i++;
611 e1000_update_mc_addr_list((struct e1000_hw *)unit->e1ku_Private00, mta_list, i);
613 FreeMem(mta_list, mc_count * ETH_ADDRESSSIZE);
615 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82542)
616 e1000func_leave_82542_rst(unit);
619 // static void e1000func_deinitialize(struct net_device *unit)
620 // {
621 // }
623 int request_irq(struct net_device *unit)
625 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
627 AddIntServer(INTB_KERNEL | unit->e1ku_IRQ, &unit->e1ku_irqhandler);
628 AddIntServer(INTB_VERTB, &unit->e1ku_touthandler);
630 D(bug("[%s] %s: IRQ Handlers configured\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
632 return 0;
635 #if 0
636 static void free_irq(struct net_device *unit)
638 RemIntServer(INTB_KERNEL | unit->e1ku_IRQ, unit->e1ku_irqhandler);
639 RemIntServer(INTB_VERTB, unit->e1ku_touthandler);
641 #endif
643 static int e1000func_setup_tx_resources(struct net_device *unit,
644 struct e1000_tx_ring *tx_ring)
646 ULONG size;
648 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
650 size = sizeof(struct e1000_buffer) * tx_ring->count;
652 D(bug("[%s] %s: Configuring for %d buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->count));
654 if ((tx_ring->buffer_info = AllocMem(size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
656 D(bug("[%s] %s: Unable to allocate memory for the transmit descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
657 return -E1000_ERR_CONFIG;
660 D(bug("[%s] %s: Tx Buffer Info @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->buffer_info, size);)
662 /* round up to nearest 4K */
663 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
664 tx_ring->size = ALIGN(tx_ring->size, 4096);
666 if ((tx_ring->desc = AllocMem(tx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
668 setup_tx_desc_die:
669 FreeMem(tx_ring->buffer_info, size);
670 D(bug("[%s] %s: Unable to allocate memory for the transmit descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
671 return -E1000_ERR_CONFIG;
673 tx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)tx_ring->desc);
675 /* Fix for errata 23, can't cross 64kB boundary */
676 if (!e1000func_check_64k_bound(unit, tx_ring->desc, tx_ring->size))
678 void *olddesc = tx_ring->desc;
679 D(bug("[%s] %s: tx_ring align check failed: %u bytes at %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->size, tx_ring->desc);)
680 /* Try again, without freeing the previous */
681 if ((tx_ring->desc = AllocMem(tx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
683 /* Failed allocation, critical failure */
684 FreeMem(olddesc, tx_ring->size);
685 tx_ring->dma = NULL;
686 goto setup_tx_desc_die;
688 tx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)tx_ring->desc);
690 if (!e1000func_check_64k_bound(unit, tx_ring->desc, tx_ring->size))
692 /* give up */
693 FreeMem(tx_ring->desc, tx_ring->size);
694 FreeMem(olddesc, tx_ring->size);
695 tx_ring->dma = NULL;
696 D(bug("[%s] %s: Unable to allocate aligned memory for the transmit descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
698 FreeMem(tx_ring->buffer_info, size);
699 return -E1000_ERR_CONFIG;
700 } else {
701 /* Free old allocation, new allocation was successful */
702 FreeMem(olddesc, tx_ring->size);
706 D(bug("[%s] %s: Tx Ring Descriptors @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->desc, tx_ring->size);)
708 tx_ring->next_to_use = 0;
709 tx_ring->next_to_clean = 0;
711 return 0;
714 int e1000func_setup_all_tx_resources(struct net_device *unit)
716 int i, err = 0;
718 for (i = 0; i < unit->e1ku_txRing_QueueSize; i++)
720 err = e1000func_setup_tx_resources(unit, &unit->e1ku_txRing[i]);
721 if (err)
723 D(bug("[%s] %s: Allocation for Tx Queue %u failed\n", unit->e1ku_name, __PRETTY_FUNCTION__, i);)
724 for (i-- ; i >= 0; i--)
726 e1000func_free_tx_resources(unit, &unit->e1ku_txRing[i]);
728 break;
732 return err;
735 static int e1000func_setup_rx_resources(struct net_device *unit,
736 struct e1000_rx_ring *rx_ring)
738 int buffer_size;
740 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
742 buffer_size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
744 D(bug("[%s] %s: Configuring for %d buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->count);)
746 if ((rx_ring->buffer_info = AllocMem(buffer_size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL) {
747 D(bug("[%s] %s: Unable to allocate memory for the receive ring buffers\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
748 return -E1000_ERR_CONFIG;
751 D(bug("[%s] %s: Rx Buffer Info @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->buffer_info, buffer_size);)
753 /* Round up to nearest 4K */
754 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
755 D(bug("[%s] %s: Wanted Size = %d bytes\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->size);)
756 rx_ring->size = ALIGN(rx_ring->size, 4096);
758 if ((rx_ring->desc = AllocMem(rx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
760 D(bug("[%s] %s: Unable to allocate memory for the receive ring descriptors\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
761 setup_rx_desc_die:
762 FreeMem(rx_ring->buffer_info, buffer_size);
763 return -E1000_ERR_CONFIG;
765 rx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)rx_ring->desc);
767 /* Fix for errata 23, can't cross 64kB boundary */
768 if (!e1000func_check_64k_bound(unit, rx_ring->desc, rx_ring->size))
770 void *olddesc = rx_ring->desc;
771 D(bug("[%s] %s: rx_ring align check failed: %u bytes at %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->size, rx_ring->desc);)
773 /* Try again, without freeing the previous */
774 if ((rx_ring->desc = AllocMem(rx_ring->size, MEMF_PUBLIC | MEMF_CLEAR)) == NULL)
776 /* Failed allocation, critical failure */
777 FreeMem(olddesc, rx_ring->size);
778 rx_ring->dma = NULL;
779 D(bug("[%s] %s: Unable to allocate memory for the receive descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
780 goto setup_rx_desc_die;
782 rx_ring->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)rx_ring->desc);
784 if (!e1000func_check_64k_bound(unit, rx_ring->desc, rx_ring->size)) {
785 /* give up */
786 FreeMem(rx_ring->desc, rx_ring->size);
787 FreeMem(olddesc, rx_ring->size);
788 rx_ring->dma = NULL;
789 D(bug("[%s] %s: Unable to allocate aligned memory for the receive descriptor ring\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
790 goto setup_rx_desc_die;
791 } else {
792 /* Free old allocation, new allocation was successful */
793 FreeMem(olddesc, rx_ring->size);
797 D(bug("[%s] %s: Rx Ring Descriptors @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->desc, rx_ring->size);)
799 /* set up ring defaults */
800 rx_ring->next_to_clean = 0;
801 rx_ring->next_to_use = 0;
803 return 0;
806 int e1000func_setup_all_rx_resources(struct net_device *unit)
808 int i, err = 0;
810 D(bug("[%s] %s(0x%p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit);)
812 for (i = 0; i < unit->e1ku_rxRing_QueueSize; i++)
814 err = e1000func_setup_rx_resources(unit, &unit->e1ku_rxRing[i]);
815 if (err)
817 D(bug("[%s] %s: Allocation for Rx Queue %u failed\n", unit->e1ku_name, __PRETTY_FUNCTION__, i);)
818 for (i-- ; i >= 0; i--)
820 e1000func_free_rx_resources(unit, &unit->e1ku_rxRing[i]);
822 break;
826 return err;
829 void e1000func_unmap_and_free_tx_resource(struct net_device *unit,
830 struct e1000_buffer *buffer_info)
832 D(bug("[%s] %s(0x%p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit);)
833 if (buffer_info->dma) {
834 buffer_info->dma = NULL;
836 if (buffer_info->buffer) {
837 FreeMem(buffer_info->buffer, ETH_MAXPACKETSIZE);
838 buffer_info->buffer = NULL;
840 /* buffer_info must be completely set up in the transmit path */
843 void e1000func_clean_tx_ring(struct net_device *unit,
844 struct e1000_tx_ring *tx_ring)
846 struct e1000_buffer *buffer_info;
847 unsigned long size;
848 unsigned int i;
850 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
852 /* Free all the Tx ring buffers */
853 for (i = 0; i < tx_ring->count; i++) {
854 buffer_info = &tx_ring->buffer_info[i];
855 e1000func_unmap_and_free_tx_resource(unit, buffer_info);
858 size = sizeof(struct e1000_buffer) * tx_ring->count;
859 memset(tx_ring->buffer_info, 0, size);
861 /* Zero out the descriptor ring */
863 memset(tx_ring->desc, 0, tx_ring->size);
865 tx_ring->next_to_use = 0;
866 tx_ring->next_to_clean = 0;
867 // tx_ring->last_tx_tso = 0;
869 writel(0, (APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdh));
870 writel(0, (APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdt));
873 void e1000func_free_tx_resources(struct net_device *unit,
874 struct e1000_tx_ring *tx_ring)
876 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
878 e1000func_clean_tx_ring(unit, tx_ring);
880 FreeMem(tx_ring->buffer_info, sizeof(struct e1000_buffer) * tx_ring->count);
881 tx_ring->buffer_info = NULL;
883 FreeMem(tx_ring->desc, tx_ring->size);
884 tx_ring->dma = tx_ring->desc = NULL;
887 void e1000func_clean_rx_ring(struct net_device *unit,
888 struct e1000_rx_ring *rx_ring)
890 struct e1000_rx_buffer *buffer_info;
891 unsigned long size;
892 unsigned int i;
894 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
896 /* Free all the Rx ring buffers */
897 for (i = 0; i < rx_ring->count; i++) {
898 buffer_info = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];
899 if (buffer_info->dma != NULL) {
900 buffer_info->dma = NULL;
902 if (buffer_info->buffer)
904 FreeMem(buffer_info->buffer, unit->rx_buffer_len);
905 buffer_info->buffer = NULL;
909 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
910 memset(rx_ring->buffer_info, 0, size);
912 /* Zero out the descriptor ring */
913 memset(rx_ring->desc, 0, rx_ring->size);
915 rx_ring->next_to_clean = 0;
916 rx_ring->next_to_use = 0;
918 writel(0, (APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdh));
919 writel(0, (APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt));
922 void e1000func_free_rx_resources(struct net_device *unit,
923 struct e1000_rx_ring *rx_ring)
925 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
927 e1000func_clean_rx_ring(unit, rx_ring);
929 FreeMem(rx_ring->buffer_info, sizeof(struct e1000_rx_buffer) * rx_ring->count);
930 rx_ring->buffer_info = NULL;
932 FreeMem(rx_ring->desc, rx_ring->size);
933 rx_ring->dma = rx_ring->desc = NULL;
936 #if 0
937 static int e1000func_close(struct net_device *unit)
939 D(bug("[%s] %s(0x%p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit);)
941 unit->e1ku_ifflags &= ~IFF_UP;
943 // ObtainSemaphore(&np->lock);
944 // np->in_shutdown = 1;
945 // ReleaseSemaphore(&np->lock);
947 unit->e1ku_toutNEED = FALSE;
949 // netif_stop_queue(unit);
950 // ObtainSemaphore(&np->lock);
952 // e1000func_deinitialize(unit); // Stop the chipset and set it in 16bit-mode
954 // ReleaseSemaphore(&np->lock);
956 free_irq(unit);
958 // drain_ring(unit);
960 // HIDD_PCIDriver_FreePCIMem(unit->e1ku_PCIDriver, np->rx_buffer);
961 // HIDD_PCIDriver_FreePCIMem(unit->e1ku_PCIDriver, np->tx_buffer);
963 ReportEvents(LIBBASE, unit, S2EVENT_OFFLINE);
965 return 0;
967 #endif
969 void e1000func_alloc_rx_buffers(struct net_device *unit,
970 struct e1000_rx_ring *rx_ring,
971 int cleaned_count)
973 struct e1000_rx_desc *rx_desc;
974 struct e1000_rx_buffer *buffer_info;
975 unsigned int i;
977 i = rx_ring->next_to_use;
980 bug("[%s]: %s(0x%p, 0x%p, %d)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit, rx_ring, cleaned_count);
981 bug("[%s]: %s: starting at %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, i);
984 while (cleaned_count--)
986 buffer_info = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];
988 if ((buffer_info->buffer = AllocMem(unit->rx_buffer_len, MEMF_PUBLIC|MEMF_CLEAR)) != NULL)
991 bug("[%s] %s: Buffer %d Allocated @ %p [%d bytes]\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, buffer_info->buffer, unit->rx_buffer_len);
992 if ((buffer_info->dma = HIDD_PCIDriver_CPUtoPCI(unit->e1ku_PCIDriver, (APTR)buffer_info->buffer)) == NULL)
994 bug("[%s] %s: Failed to Map Buffer %d for DMA!!\n", unit->e1ku_name, __PRETTY_FUNCTION__, i);
996 bug("[%s] %s: Buffer %d DMA @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, buffer_info->dma);
999 rx_desc = E1000_RX_DESC(rx_ring, i);
1000 // rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1001 rx_desc->buffer_addr = (IPTR)buffer_info->dma;
1004 if (++i == rx_ring->count)
1005 i = 0;
1008 bug("[%s]: %s: next_to_use = %d, i = %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->next_to_use, i);
1010 if (rx_ring->next_to_use != i) {
1011 rx_ring->next_to_use = i;
1012 if (i-- == 0)
1013 i = (rx_ring->count - 1);
1016 bug("[%s]: %s: Adjusting RDT to %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, i);
1018 writel(i, (APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt));
1022 void e1000func_configure(struct net_device *unit)
1024 int i;
1026 D(bug("[%s]: %s(0x%p)\n", unit->e1ku_name, __PRETTY_FUNCTION__, unit));
1028 e1000func_set_multi(unit);
1030 e1000func_configure_tx(unit);
1031 e1000func_setup_rctl(unit);
1032 e1000func_configure_rx(unit);
1033 D(bug("[%s] %s: Tx/Rx Configured\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1035 /* call E1000_DESC_UNUSED which always leaves
1036 * at least 1 descriptor unused to make sure
1037 * next_to_use != next_to_clean */
1038 for (i = 0; i < unit->e1ku_rxRing_QueueSize; i++)
1040 D(bug("[%s] %s: Allocating Rx Buffers for queue %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, i));
1041 struct e1000_rx_ring *ring = &unit->e1ku_rxRing[i];
1042 e1000func_alloc_rx_buffers(unit, ring, E1000_DESC_UNUSED(ring));
1044 D(bug("[%s] %s: Finished\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1047 BOOL e1000func_clean_tx_irq(struct net_device *unit,
1048 struct e1000_tx_ring *tx_ring)
1050 struct e1000_tx_desc *tx_desc, *eop_desc;
1051 struct e1000_buffer *buffer_info;
1052 unsigned int i, eop;
1053 BOOL cleaned = FALSE;
1054 BOOL retval = FALSE;
1055 unsigned int total_tx_packets=0;
1057 D(bug("[%s]: %s()\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1059 i = tx_ring->next_to_clean;
1060 eop = tx_ring->buffer_info[i].next_to_watch;
1061 eop_desc = E1000_TX_DESC(tx_ring, eop);
1063 D(bug("[%s] %s: starting at %d, eop=%d, desc @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, eop, eop_desc));
1065 while (eop_desc->upper.data & AROS_LONG2LE(E1000_TXD_STAT_DD)) {
1066 for (cleaned = FALSE; !cleaned; ) {
1067 D(bug("[%s] %s: cleaning Tx buffer %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, i));
1068 tx_desc = E1000_TX_DESC(tx_ring, i);
1069 buffer_info = &tx_ring->buffer_info[i];
1070 cleaned = (i == eop);
1072 if (cleaned) {
1073 retval = TRUE;
1074 total_tx_packets++;
1076 e1000func_unmap_and_free_tx_resource(unit, buffer_info);
1077 tx_desc->upper.data = 0;
1079 if (++i == tx_ring->count)
1080 i = 0;
1083 eop = tx_ring->buffer_info[i].next_to_watch;
1084 eop_desc = E1000_TX_DESC(tx_ring, eop);
1087 tx_ring->next_to_clean = i;
1089 #define TX_WAKE_THRESHOLD 32
1090 // if (cleaned && netif_carrier_ok(netdev) &&
1091 // E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) {
1092 /* Make sure that anybody stopping the queue after this
1093 * sees the new next_to_clean.
1095 // smp_mb();
1097 // if (netif_queue_stopped(netdev) &&
1098 // !(test_bit(__E1000_DOWN, &adapter->state))) {
1099 // netif_wake_queue(netdev);
1100 // ++adapter->restart_queue;
1101 // }
1102 // }
1104 if (unit->detect_tx_hung) {
1105 /* Detect a transmit hang in hardware, this serializes the
1106 * check with the clearing of time_stamp and movement of i */
1107 unit->detect_tx_hung = FALSE;
1108 if (tx_ring->buffer_info[eop].dma && !(E1000_READ_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_STATUS) & E1000_STATUS_TXOFF)) {
1109 /* detected Tx unit hang */
1111 bug("[%s] %s: Detected Tx Unit Hang -:\n", unit->e1ku_name);
1112 bug("[%s] %s: Tx Queue <%lu>\n", unit->e1ku_name, __PRETTY_FUNCTION__, (unsigned long)((tx_ring - unit->e1ku_txRing) / sizeof(struct e1000_tx_ring)));
1113 bug("[%s] %s: TDH <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, MMIO_R32(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdh));
1114 bug("[%s] %s: TDT <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, MMIO_R32(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + tx_ring->tdt));
1115 bug("[%s] %s: next_to_use <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->next_to_use);
1116 bug("[%s] %s: next_to_clean <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, tx_ring->next_to_clean);
1117 bug("[%s] %s: buffer_info[next_to_clean]\n", unit->e1ku_name, __PRETTY_FUNCTION__);
1118 bug("[%s] %s: next_to_watch <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, eop);
1119 bug("[%s] %s: next_to_watch.status <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, eop_desc->upper.fields.status);
1121 // netif_stop_queue(netdev);
1124 unit->e1ku_stats.PacketsSent += total_tx_packets;
1125 // adapter->total_tx_packets += total_tx_packets;
1126 return retval;
1129 static void e1000func_rx_checksum(struct net_device *unit, u32 status_err,
1130 u32 csum, struct eth_frame *frame)
1132 BOOL doChecksum = TRUE;
1133 #if (0)
1134 u16 status = (u16)status_err;
1135 u8 errors = (u8)(status_err >> 24);
1136 skb->ip_summed = CHECKSUM_NONE;
1138 /* 82543 or newer only */
1139 if (unlikely(adapter->hw.mac.type < e1000_82543)) return;
1140 /* Ignore Checksum bit is set */
1141 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
1142 /* TCP/UDP checksum error bit is set */
1143 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
1144 /* let the stack verify checksum errors */
1145 adapter->hw_csum_err++;
1146 return;
1148 /* TCP/UDP Checksum has not been calculated */
1149 if (adapter->hw.mac.type <= e1000_82547_rev_2) {
1150 if (!(status & E1000_RXD_STAT_TCPCS))
1151 return;
1152 } else {
1153 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
1154 return;
1156 /* It must be a TCP or UDP packet with a valid checksum */
1157 if (likely(status & E1000_RXD_STAT_TCPCS)) {
1158 /* TCP checksum is good */
1159 skb->ip_summed = CHECKSUM_UNNECESSARY;
1161 adapter->hw_csum_good++;
1162 #else
1163 bug("[%s] %s: Frame (Pre)Checksum %x%x%x%x\n", unit->e1ku_name, __PRETTY_FUNCTION__, frame->eth_packet_crc[0], frame->eth_packet_crc[1], frame->eth_packet_crc[2], frame->eth_packet_crc[3]);
1164 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type >= e1000_82543)
1166 if (status_err & E1000_RXD_STAT_IXSM)
1167 return;
1169 /* Make sure TCP/UDP checksum error bit is not set */
1170 if (!((status_err >> 24) & E1000_RXD_ERR_TCPE)) {
1171 BOOL valid = TRUE;
1172 /* Check if TCP/UDP Checksum has been calculated */
1173 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type <= e1000_82547_rev_2) {
1174 if (!(status_err & E1000_RXD_STAT_TCPCS))
1175 valid = FALSE;
1176 } else {
1177 if (!(status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
1178 valid = FALSE;
1180 /* It must be a TCP or UDP packet with a valid checksum */
1181 if (valid && (status_err & E1000_RXD_STAT_TCPCS)) {
1182 /* TCP checksum is good */
1183 bug("[%s] %s: Using offloaded Checksum\n", unit->e1ku_name, __PRETTY_FUNCTION__);
1185 doChecksum = FALSE;
1186 frame->eth_packet_crc[0] = (csum & 0xff000000) >> 24;
1187 frame->eth_packet_crc[1] = (csum & 0xff0000) >> 16;
1188 frame->eth_packet_crc[2] = (csum & 0xff00) >> 8;
1189 frame->eth_packet_crc[3] = csum & 0xff;
1191 #if (HAVE_CSUM_STATS)
1192 unit->e1ku_stats.hw_csum_good++;
1193 #endif
1195 else
1197 /* let the stack verify checksum errors */
1198 bug("[%s] %s: Checksum Error\n", unit->e1ku_name, __PRETTY_FUNCTION__);
1199 #if (HAVE_CSUM_STATS)
1200 unit->e1ku_stats.hw_csum_err++;
1201 #endif
1204 #endif
1205 if (doChecksum)
1207 // We need to calculate the frames checksum ...
1208 bug("[%s] %s: Frames checksum needs calculated...\n", unit->e1ku_name, __PRETTY_FUNCTION__);
1211 bug("[%s] %s: Frame (Post)Checksum %x%x%x%x\n", unit->e1ku_name, __PRETTY_FUNCTION__, frame->eth_packet_crc[0], frame->eth_packet_crc[1], frame->eth_packet_crc[2], frame->eth_packet_crc[3]);
1214 UBYTE get_status(struct net_device *unit,
1215 UBYTE *_status, struct e1000_rx_desc *rx_desc)
1217 *_status = rx_desc->status;
1218 bug("[%s] %s: Status: %08x\n", unit->e1ku_name, __PRETTY_FUNCTION__, *_status);
1219 return *_status;
1222 BOOL e1000func_clean_rx_irq(struct net_device *unit,
1223 struct e1000_rx_ring *rx_ring)
1225 struct e1000_rx_desc *rx_desc, *next_rxd;
1226 D(struct e1000_rx_buffer *buffer_info, *next_buffer;)
1227 struct Opener *opener, *opener_tail;
1228 struct IOSana2Req *request, *request_tail;
1229 struct eth_frame *frame;
1231 unsigned int i, total_rx_bytes=0, total_rx_packets=0;
1232 int cleaned_count = 0;
1233 UBYTE status = 0;
1234 ULONG length;
1235 BOOL accepted, is_orphan, cleaned = FALSE, update = FALSE;
1237 i = rx_ring->next_to_clean;
1238 rx_desc = E1000_RX_DESC(rx_ring, i);
1239 D(buffer_info = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];)
1241 D(bug("[%s] %s: Starting at %d, Rx Desc @ %p, Buffer Info @ %p\n", unit->e1ku_name, __PRETTY_FUNCTION__, i, rx_desc, buffer_info);)
1243 while (get_status(unit, &status, rx_desc) & E1000_RXD_STAT_DD) {
1245 int buffer_no = i;
1247 #if (BROKEN_RX_QUEUE)
1248 // Queue stalls using this ....
1249 if (++i == rx_ring->count) i = 0;
1250 #else
1251 // ... so for our sanity we loop at the rings tail
1252 if (++i >= readl((APTR)(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt)))
1254 i = 0;
1255 update = TRUE;
1257 #endif
1258 next_rxd = E1000_RX_DESC(rx_ring, i);
1259 #if (HAVE_PREFETCH)
1260 prefetch(next_rxd);
1261 #endif
1263 D(next_buffer = (struct e1000_rx_buffer *)&rx_ring->buffer_info[i];);
1265 cleaned = TRUE;
1266 cleaned_count++;
1268 length = AROS_LE2WORD(rx_desc->length);
1270 /* !EOP means multiple descriptors were used to store a single
1271 * packet, also make sure the frame isn't just CRC only */
1272 if (!(status & E1000_RXD_STAT_EOP) || (length <= ETH_CRCSIZE)) {
1273 /* All receives must fit into a single buffer */
1274 D(bug("[%s] %s: Receive packet consumed multiple buffers - recyclcing\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
1275 /* recycle */
1276 goto next_desc;
1279 frame = (struct eth_frame *)(IPTR)rx_desc->buffer_addr;
1281 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK){
1282 UBYTE last_byte = *(frame->eth_packet_data + length - 1);
1283 D(bug("[%s] %s: Frame Error %d (last byte %x)\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_desc->errors, last_byte);)
1284 if (TBI_ACCEPT((struct e1000_hw *)unit->e1ku_Private00, status,
1285 rx_desc->errors, length, last_byte,
1286 unit->e1ku_frame_min,
1287 unit->e1ku_frame_max))
1289 D(bug("[%s] %s: TBI accepted\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
1290 e1000_tbi_adjust_stats_82543((struct e1000_hw *)unit->e1ku_Private00,
1291 unit->e1ku_hw_stats,
1292 length, frame->eth_packet_data,
1293 unit->e1ku_frame_max);
1295 length--;
1296 } else {
1297 /* recycle */
1298 D(bug("[%s] %s: TBI rejected - recyclcing\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
1299 goto next_desc;
1303 /* got a valid packet - forward it to the network core */
1304 is_orphan = TRUE;
1306 /* adjust length to remove Ethernet CRC, this must be
1307 * done after the TBI_ACCEPT workaround above */
1308 length -= ETH_CRCSIZE;
1310 /* probably a little skewed due to removing CRC */
1311 total_rx_bytes += length;
1312 total_rx_packets++;
1314 /* Receive Checksum Offload */
1315 e1000func_rx_checksum(unit,
1316 (ULONG)(status) |
1317 ((ULONG)(rx_desc->errors) << 24),
1318 AROS_LE2WORD(rx_desc->csum), frame);
1320 /* Dump contents of frame if DEBUG enabled */
1322 int j;
1323 bug("[%s]: Rx Buffer %d Packet Dump -:", unit->e1ku_name, buffer_no);
1324 for (j=0; j<64; j++) {
1325 if ((j%16) == 0)
1327 bug("\n[%s]: %03x:", unit->e1ku_name, j);
1329 bug(" %02x", ((unsigned char*)frame)[j]);
1331 bug("\n");
1334 /* Check for address validity */
1335 if(AddressFilter(LIBBASE, unit, frame->eth_packet_dest))
1338 ULONG *framecrc_ptr = (ULONG *)frame->eth_packet_crc;
1339 bug("[%s] %s: Packet IP accepted with type = %d, checksum = %08x\n", unit->e1ku_name, __PRETTY_FUNCTION__, AROS_BE2WORD(frame->eth_packet_type), AROS_LE2LONG(*framecrc_ptr));
1341 /* Packet is addressed to this driver */
1343 opener = (APTR)unit->e1ku_Openers.mlh_Head;
1344 opener_tail = (APTR)&unit->e1ku_Openers.mlh_Tail;
1346 /* Offer packet to every opener */
1347 while(opener != opener_tail)
1349 request = (APTR)opener->read_port.mp_MsgList.lh_Head;
1350 request_tail = (APTR)&opener->read_port.mp_MsgList.lh_Tail;
1351 accepted = FALSE;
1353 /* Offer packet to each request until it's accepted */
1354 while((request != request_tail) && !accepted)
1356 if (request->ios2_PacketType == AROS_BE2WORD(frame->eth_packet_type))
1358 D(bug("[%s] %s: copy packet for opener ..\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
1359 CopyPacket(LIBBASE, unit, request, length, AROS_BE2WORD(frame->eth_packet_type), frame);
1360 accepted = TRUE;
1362 request = (struct IOSana2Req *)request->ios2_Req.io_Message.mn_Node.ln_Succ;
1365 if(accepted)
1366 is_orphan = FALSE;
1368 opener = (APTR)opener->node.mln_Succ;
1371 /* If packet was unwanted, give it to S2_READORPHAN request */
1372 if(is_orphan)
1374 unit->e1ku_stats.UnknownTypesReceived++;
1376 if(!IsMsgPortEmpty(unit->e1ku_request_ports[ADOPT_QUEUE]))
1378 CopyPacket(LIBBASE, unit,
1379 (APTR)unit->e1ku_request_ports[ADOPT_QUEUE]->
1380 mp_MsgList.lh_Head, length, AROS_BE2WORD(frame->eth_packet_type), frame);
1381 D(bug("[%s] %s: packet copied to orphan queue\n", unit->e1ku_name, __PRETTY_FUNCTION__);)
1386 next_desc:
1387 rx_desc->status = 0;
1389 /* use prefetched values */
1390 rx_desc = next_rxd;
1391 D(buffer_info = next_buffer);
1393 rx_ring->next_to_clean = i;
1395 #if (BROKEN_RX_QUEUE)
1396 // Enabling this stalls the queue ...
1397 if ((cleaned_count = E1000_DESC_UNUSED(rx_ring)))
1399 D(bug("[%s] %s: Updating rdt\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1400 writel(i, ((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt);
1402 #else
1403 // ...but it seems we have to tell the hardware to wrap around?
1404 if (update == TRUE)
1406 D(bug("[%s] %s: Adjusting RDH/RDT\n", unit->e1ku_name, __PRETTY_FUNCTION__));
1407 writel(readl(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt), ((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt);
1408 writel(i, ((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdh);
1410 #endif
1413 bug("[%s] %s: Next to clean = %d\n", unit->e1ku_name, __PRETTY_FUNCTION__, rx_ring->next_to_clean);
1414 bug("[%s] %s: RDH <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, readl(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdh));
1415 bug("[%s] %s: RDT <%x>\n", unit->e1ku_name, __PRETTY_FUNCTION__, readl(((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt));
1418 unit->e1ku_stats.PacketsReceived += total_rx_packets;
1419 //adapter->total_rx_packets += total_rx_packets;
1420 //adapter->total_rx_bytes += total_rx_bytes;
1421 D(bug("[%s] %s: Received %d packets (%d bytes)\n", unit->e1ku_name, __PRETTY_FUNCTION__, total_rx_packets, total_rx_bytes);)
1423 return cleaned;
1426 /** OS SUPPORT CALLS FOR INTEL CODE **/
1428 void e1000_pci_clear_mwi(struct e1000_hw *hw)
1430 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg;
1432 D(bug("[%s]: %s()\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__);)
1434 /* Check if the devices cache line size is set first ?*/
1435 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1436 pciwritemsg.reg = 0x04;
1437 pciwritemsg.val = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg) & ~0x0010;
1438 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_WriteConfigWord);
1439 OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg);
1442 void e1000_pci_set_mwi(struct e1000_hw *hw)
1444 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg;
1446 D(bug("[%s]: %s()\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__);)
1448 /* Check if the devices cache line size is set first ?*/
1449 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1450 pciwritemsg.reg = 0x04;
1451 pciwritemsg.val = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg) | 0x0010;
1452 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_WriteConfigWord);
1453 OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg);
1456 LONG e1000_read_pcie_cap_reg(struct e1000_hw *hw, ULONG reg, UWORD *value)
1458 struct pHidd_PCIDevice_ReadConfigWord pcireadmsg;
1460 D(bug("[%s]: %s(reg:%d)\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, reg);)
1462 if (((struct e1000Unit *)hw->back)->e1ku_PCIeCap)
1464 pcireadmsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1465 pcireadmsg.reg = ((struct e1000Unit *)hw->back)->e1ku_PCIeCap + reg;
1466 *value = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pcireadmsg);
1467 D(bug("[%s] %s: ------> [%04x]\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, *value);)
1468 return (E1000_SUCCESS);
1471 return 0;
1474 void e1000_read_pci_cfg(struct e1000_hw *hw, ULONG reg, UWORD *value)
1476 struct pHidd_PCIDevice_ReadConfigWord pcireadmsg;
1477 D(bug("[%s]: %s(reg:%d)\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, reg);)
1479 pcireadmsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_ReadConfigWord);
1480 pcireadmsg.reg = reg;
1481 *value = (UWORD)OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pcireadmsg);
1482 D(bug("[%s] %s: ------> [%04x]\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, *value);)
1485 void e1000_write_pci_cfg(struct e1000_hw *hw, ULONG reg, UWORD *value)
1487 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg;
1488 D(bug("[%s]: %s(reg:%d, %04x)\n", ((struct e1000Unit *)hw->back)->e1ku_name, __PRETTY_FUNCTION__, reg, *value);)
1490 pciwritemsg.mID = OOP_GetMethodID(IID_Hidd_PCIDevice, moHidd_PCIDevice_WriteConfigWord);
1491 pciwritemsg.reg = reg;
1492 pciwritemsg.val = *value;
1493 OOP_DoMethod(((struct e1000Unit *)hw->back)->e1ku_PCIDevice, (OOP_Msg)&pciwritemsg);