5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston,
21 #include <exec/types.h>
22 #include <exec/resident.h>
24 #include <exec/ports.h>
26 #include <aros/libcall.h>
27 #include <aros/macros.h>
32 #include <devices/sana2.h>
33 #include <devices/sana2specialstats.h>
35 #include <utility/utility.h>
36 #include <utility/tagitem.h>
37 #include <utility/hooks.h>
42 #include <proto/oop.h>
43 #include <proto/exec.h>
44 #include <proto/dos.h>
45 #include <proto/battclock.h>
49 #include "e1000_osdep.h"
51 #include "e1000_defines.h"
52 #include "e1000_api.h"
55 #include LC_LIBDEFS_FILE
57 /* A bit fixed linux stuff here :) */
60 #define LIBBASE (unit->e1ku_device)
62 void e1000_usec_delay(struct net_device
*unit
, ULONG usec
)
66 unit
->e1ku_DelayPort
.mp_SigTask
= FindTask(NULL
);
67 unit
->e1ku_DelayReq
.tr_node
.io_Command
= TR_ADDREQUEST
;
68 unit
->e1ku_DelayReq
.tr_time
.tv_micro
= usec
% 1000000;
69 unit
->e1ku_DelayReq
.tr_time
.tv_secs
= usec
/ 1000000;
71 DoIO((struct IORequest
*)&unit
->e1ku_DelayReq
);
75 void e1000_msec_delay(struct net_device
*unit
, ULONG msec
)
77 e1000_usec_delay(unit
, 1000 * msec
);
80 void e1000_msec_delay_irq(struct net_device
*unit
, ULONG msec
)
82 e1000_usec_delay(unit
, 1000 * msec
);
85 void MMIO_W8(APTR addr
, UBYTE val8
)
89 *((volatile UBYTE
*)(addr
)) = (val8
);
94 void MMIO_W16(APTR addr
, UWORD val16
)
98 *((volatile UWORD
*)(addr
)) = (val16
);
100 tmp
= MMIO_R16(addr
);
104 void MMIO_W32(APTR addr
, ULONG val32
)
108 *((volatile ULONG
*)(addr
)) = (val32
);
110 tmp
= MMIO_R32(addr
);
113 static BOOL
e1000func_check_64k_bound(struct net_device
*unit
,
114 void *start
, unsigned long len
)
116 unsigned long begin
= (unsigned long) start
;
117 unsigned long end
= begin
+ len
;
119 /* First rev 82545 and 82546 need to not allow any memory
120 * write location to cross 64k boundary due to errata 23 */
121 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
== e1000_82545
||
122 ((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
== e1000_82546
) {
123 return ((begin
^ (end
- 1)) >> 16) != 0 ? FALSE
: TRUE
;
129 void e1000func_irq_disable(struct net_device
*unit
)
132 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_IMC
, ~0);
133 tmp
= E1000_WRITE_FLUSH((struct e1000_hw
*)unit
->e1ku_Private00
);
136 void e1000func_irq_enable(struct net_device
*unit
)
139 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_IMS
, IMS_ENABLE_MASK
);
140 tmp
= E1000_WRITE_FLUSH((struct e1000_hw
*)unit
->e1ku_Private00
);
143 static void e1000func_enter_82542_rst(struct net_device
*unit
)
148 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
!= e1000_82542
)
150 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->revision_id
!= E1000_REVISION_2
)
153 e1000_pci_clear_mwi((struct e1000_hw
*)unit
->e1ku_Private00
);
155 rctl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
);
156 rctl
|= E1000_RCTL_RST
;
157 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
, rctl
);
158 tmp
= E1000_WRITE_FLUSH((struct e1000_hw
*)unit
->e1ku_Private00
);
160 e1000_msec_delay(unit
, 5);
162 // if (netif_running(netdev))
163 // e1000_clean_all_rx_rings(adapter);
166 static void e1000func_leave_82542_rst(struct net_device
*unit
)
171 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
!= e1000_82542
)
173 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->revision_id
!= E1000_REVISION_2
)
176 rctl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
);
177 rctl
&= ~E1000_RCTL_RST
;
178 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
, rctl
);
179 tmp
= E1000_WRITE_FLUSH((struct e1000_hw
*)unit
->e1ku_Private00
);
181 e1000_msec_delay(unit
, 5);
183 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->bus
.pci_cmd_word
& CMD_MEM_WRT_INVALIDATE
)
184 e1000_pci_set_mwi((struct e1000_hw
*)unit
->e1ku_Private00
);
186 // if (netif_running(netdev)) {
187 // /* No need to loop, because 82542 supports only 1 queue */
188 // struct e1000_rx_ring *ring = &adapter->rx_ring[0];
189 // e1000_configure_rx(adapter);
190 // adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
194 static void e1000func_configure_tx(struct net_device
*unit
)
196 ULONG tdlen
, tctl
, tipg
;
201 D(bug("[%s]: e1000func_configure_tx(unit @ %p)\n", unit
->e1ku_name
, unit
));
203 /* Setup the HW Tx Head and Tail descriptor pointers */
204 for (i
= 0; i
< unit
->e1ku_txRing_QueueSize
; i
++)
206 D(bug("[%s]: e1000func_configure_tx: Tx Queue %d @ %p)\n", unit
->e1ku_name
, i
, &unit
->e1ku_txRing
[i
]));
207 D(bug("[%s]: e1000func_configure_tx: Tx Queue count = %d)\n", unit
->e1ku_name
, unit
->e1ku_txRing
[i
].count
));
208 tdba
= (UQUAD
)unit
->e1ku_txRing
[i
].dma
;
209 tdlen
= (ULONG
)(unit
->e1ku_txRing
[i
].count
* sizeof(struct e1000_tx_desc
));
210 D(bug("[%s]: e1000func_configure_tx: Tx Queue Ring Descriptor DMA @ %p [%d bytes]\n", unit
->e1ku_name
, unit
->e1ku_txRing
[i
].dma
, tdlen
));
211 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDBAL(i
), (ULONG
)(tdba
& 0x00000000ffffffffULL
));
212 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDBAH(i
), (ULONG
)(tdba
>> 32));
213 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDLEN(i
), tdlen
);
214 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDH(i
), 0);
215 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDT(i
), 0);
216 unit
->e1ku_txRing
[i
].tdh
= E1000_REGISTER((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDH(i
));
217 unit
->e1ku_txRing
[i
].tdt
= E1000_REGISTER((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TDT(i
));
218 D(bug("[%s]: e1000func_configure_tx: Tx Queue TDH=%d, TDT=%d\n", unit
->e1ku_name
, unit
->e1ku_txRing
[i
].tdh
, unit
->e1ku_txRing
[i
].tdt
));
221 /* Set the default values for the Tx Inter Packet Gap timer */
222 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
<= e1000_82547_rev_2
&&
223 (((struct e1000_hw
*)unit
->e1ku_Private00
)->phy
.media_type
== e1000_media_type_fiber
||
224 ((struct e1000_hw
*)unit
->e1ku_Private00
)->phy
.media_type
== e1000_media_type_internal_serdes
))
225 tipg
= DEFAULT_82543_TIPG_IPGT_FIBER
;
227 tipg
= DEFAULT_82543_TIPG_IPGT_COPPER
;
229 switch (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
) {
231 tipg
= DEFAULT_82542_TIPG_IPGT
;
232 ipgr1
= DEFAULT_82542_TIPG_IPGR1
;
233 ipgr2
= DEFAULT_82542_TIPG_IPGR2
;
236 ipgr1
= DEFAULT_82543_TIPG_IPGR1
;
237 ipgr2
= DEFAULT_82543_TIPG_IPGR2
;
240 tipg
|= ipgr1
<< E1000_TIPG_IPGR1_SHIFT
;
241 tipg
|= ipgr2
<< E1000_TIPG_IPGR2_SHIFT
;
242 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TIPG
, tipg
);
244 /* Set the Tx Interrupt Delay register */
245 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TIDV
, 0);
246 // if (unit->flags & E1000_FLAG_HAS_INTR_MODERATION)
247 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_TADV, unit->tx_abs_int_delay);
249 /* Program the Transmit Control Register */
250 tctl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TCTL
);
251 tctl
&= ~E1000_TCTL_CT
;
252 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
253 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
255 e1000_config_collision_dist((struct e1000_hw
*)unit
->e1ku_Private00
);
257 /* Setup Transmit Descriptor Settings for eop descriptor */
258 unit
->txd_cmd
= E1000_TXD_CMD_EOP
| E1000_TXD_CMD_IFCS
;
260 /* only set IDE if we are delaying interrupts using the timers */
261 // if (unit->tx_int_delay)
262 // unit->txd_cmd |= E1000_TXD_CMD_IDE;
264 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
< e1000_82543
)
265 unit
->txd_cmd
|= E1000_TXD_CMD_RPS
;
267 unit
->txd_cmd
|= E1000_TXD_CMD_RS
;
269 /* Cache if we're 82544 running in PCI-X because we'll
270 * need this to apply a workaround later in the send path. */
271 // if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type == e1000_82544 &&
272 // ((struct e1000_hw *)unit->e1ku_Private00)->bus.type == e1000_bus_type_pcix)
273 // adapter->pcix_82544 = 1;
275 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_TCTL
, tctl
);
278 static void e1000func_setup_rctl(struct net_device
*unit
)
282 D(bug("[%s]: e1000func_setup_rctl()\n", unit
->e1ku_name
));
284 rctl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
);
286 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
288 rctl
|= E1000_RCTL_BAM
| E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
289 (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
291 /* disable the stripping of CRC because it breaks
292 * BMC firmware connected over SMBUS
293 if (((struct e1000_hw *)unit->e1ku_Private00)->mac.type > e1000_82543)
294 rctl |= E1000_RCTL_SECRC;
297 if (e1000_tbi_sbp_enabled_82543((struct e1000_hw
*)unit
->e1ku_Private00
))
298 rctl
|= E1000_RCTL_SBP
;
300 rctl
&= ~E1000_RCTL_SBP
;
302 if (unit
->e1ku_mtu
<= ETH_DATA_LEN
)
303 rctl
&= ~E1000_RCTL_LPE
;
305 rctl
|= E1000_RCTL_LPE
;
307 /* Setup buffer sizes */
308 rctl
&= ~E1000_RCTL_SZ_4096
;
309 rctl
|= E1000_RCTL_BSEX
;
310 switch (unit
->rx_buffer_len
) {
311 case E1000_RXBUFFER_256
:
312 rctl
|= E1000_RCTL_SZ_256
;
313 rctl
&= ~E1000_RCTL_BSEX
;
315 case E1000_RXBUFFER_512
:
316 rctl
|= E1000_RCTL_SZ_512
;
317 rctl
&= ~E1000_RCTL_BSEX
;
319 case E1000_RXBUFFER_1024
:
320 rctl
|= E1000_RCTL_SZ_1024
;
321 rctl
&= ~E1000_RCTL_BSEX
;
323 case E1000_RXBUFFER_2048
:
325 rctl
|= E1000_RCTL_SZ_2048
;
326 rctl
&= ~E1000_RCTL_BSEX
;
328 case E1000_RXBUFFER_4096
:
329 rctl
|= E1000_RCTL_SZ_4096
;
331 case E1000_RXBUFFER_8192
:
332 rctl
|= E1000_RCTL_SZ_8192
;
334 case E1000_RXBUFFER_16384
:
335 rctl
|= E1000_RCTL_SZ_16384
;
339 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
, rctl
);
342 static void e1000func_configure_rx(struct net_device
*unit
)
345 ULONG rdlen
, rctl
, rxcsum
;
349 D(bug("[%s]: e1000func_configure_rx()\n", unit
->e1ku_name
));
351 /* disable receivers while setting up the descriptors */
352 rctl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
);
353 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
354 tmp
= E1000_WRITE_FLUSH((struct e1000_hw
*)unit
->e1ku_Private00
);
356 e1000_msec_delay(unit
, 10);
358 /* set the Receive Delay Timer Register */
359 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDTR
, 0);
361 // if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) {
362 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_RADV, adapter->rx_abs_int_delay);
363 // if (adapter->itr_setting != 0)
364 // E1000_WRITE_REG((struct e1000_hw *)unit->e1ku_Private00, E1000_ITR,
365 // 1000000000 / (adapter->itr * 256));
368 /* Setup the HW Rx Head and Tail Descriptor Pointers and
369 * the Base and Length of the Rx Descriptor Ring */
370 for (i
= 0; i
< unit
->e1ku_rxRing_QueueSize
; i
++)
372 D(bug("[%s]: e1000func_configure_rx: Rx Queue %d @ %p)\n", unit
->e1ku_name
, i
, &unit
->e1ku_rxRing
[i
]));
373 D(bug("[%s]: e1000func_configure_rx: Rx Queue count = %d)\n", unit
->e1ku_name
, unit
->e1ku_rxRing
[i
].count
));
374 rdlen
= (ULONG
)(unit
->e1ku_rxRing
[i
].count
* sizeof(struct e1000_rx_desc
));
375 rdba
= (UQUAD
)unit
->e1ku_rxRing
[i
].dma
;
376 D(bug("[%s]: e1000func_configure_rx: Rx Queue Ring Descriptor DMA @ %p, [%d bytes]\n", unit
->e1ku_name
, unit
->e1ku_rxRing
[i
].dma
, rdlen
));
377 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDBAL(i
), (ULONG
)(rdba
& 0x00000000ffffffffULL
));
378 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDBAH(i
), (ULONG
)(rdba
>> 32));
379 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDLEN(i
), rdlen
);
380 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDH(i
), 0);
381 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDT(i
), 0);
382 unit
->e1ku_rxRing
[i
].rdh
= E1000_REGISTER((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDH(i
));
383 unit
->e1ku_rxRing
[i
].rdt
= E1000_REGISTER((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RDT(i
));
384 D(bug("[%s]: e1000func_configure_rx: Rx Queue RDH=%d, RDT=%d\n", unit
->e1ku_name
, unit
->e1ku_rxRing
[i
].rdh
, unit
->e1ku_rxRing
[i
].rdt
));
387 D(bug("[%s]: e1000func_configure_rx: Configuring checksum Offload..\n", unit
->e1ku_name
));
388 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
>= e1000_82543
) {
389 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
390 rxcsum
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RXCSUM
);
391 // if (unit->rx_csum == TRUE) {
392 // rxcsum |= E1000_RXCSUM_TUOFL;
394 rxcsum
&= ~E1000_RXCSUM_TUOFL
;
395 /* don't need to clear IPPCSE as it defaults to 0 */
397 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RXCSUM
, rxcsum
);
400 /* Enable Receivers */
401 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
, rctl
);
404 void e1000func_reset(struct net_device
*unit
)
406 struct e1000_mac_info
*mac
= &((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
;
407 struct e1000_fc_info
*fc
= &((struct e1000_hw
*)unit
->e1ku_Private00
)->fc
;
408 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
409 bool legacy_pba_adjust
= FALSE
;
412 D(bug("[%s]: e1000func_reset()\n", unit
->e1ku_name
));
414 /* Repartition Pba for greater than 9k mtu
415 * To take effect CTRL.RST is required.
424 case e1000_82541_rev_2
:
425 legacy_pba_adjust
= TRUE
;
429 case e1000_82545_rev_3
:
431 case e1000_82546_rev_3
:
435 case e1000_82547_rev_2
:
436 legacy_pba_adjust
= TRUE
;
439 case e1000_undefined
:
444 if (legacy_pba_adjust
== TRUE
) {
445 if (unit
->e1ku_frame_max
> E1000_RXBUFFER_8192
)
446 pba
-= 8; /* allocate more FIFO for Tx */
448 if (mac
->type
== e1000_82547
) {
449 unit
->e1ku_tx_fifo_head
= 0;
450 unit
->e1ku_tx_head_addr
= pba
<< E1000_TX_HEAD_ADDR_SHIFT
;
451 unit
->e1ku_tx_fifo_size
=
452 (E1000_PBA_40K
- pba
) << E1000_PBA_BYTES_SHIFT
;
453 // atomic_set(&unit->tx_fifo_stall, 0);
455 } else if (unit
->e1ku_frame_max
> ETH_MAXPACKETSIZE
) {
456 /* adjust PBA for jumbo frames */
457 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_PBA
, pba
);
459 /* To maintain wire speed transmits, the Tx FIFO should be
460 * large enough to accommodate two full transmit packets,
461 * rounded up to the next 1KB and expressed in KB. Likewise,
462 * the Rx FIFO should be large enough to accommodate at least
463 * one full receive packet and is similarly rounded up and
464 * expressed in KB. */
465 pba
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_PBA
);
466 /* upper 16 bits has Tx packet buffer allocation size in KB */
467 tx_space
= pba
>> 16;
468 /* lower 16 bits has Rx packet buffer allocation size in KB */
470 /* the tx fifo also stores 16 bytes of information about the tx
471 * but don't include ethernet FCS because hardware appends it */
472 min_tx_space
= (unit
->e1ku_frame_max
+
473 sizeof(struct e1000_tx_desc
) -
475 min_tx_space
= ALIGN(min_tx_space
, 1024);
477 /* software strips receive CRC, so leave room for it */
478 min_rx_space
= unit
->e1ku_frame_max
;
479 min_rx_space
= ALIGN(min_rx_space
, 1024);
482 /* If current Tx allocation is less than the min Tx FIFO size,
483 * and the min Tx FIFO size is less than the current Rx FIFO
484 * allocation, take space away from current Rx allocation */
485 if (tx_space
< min_tx_space
&&
486 ((min_tx_space
- tx_space
) < pba
)) {
487 pba
= pba
- (min_tx_space
- tx_space
);
489 /* PCI/PCIx hardware has PBA alignment constraints */
491 case e1000_82545
... e1000_82546_rev_3
:
492 pba
&= ~(E1000_PBA_8K
- 1);
498 /* if short on rx space, rx wins and must trump tx
499 * adjustment or use Early Receive if available */
500 if (pba
< min_rx_space
) {
506 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_PBA
, pba
);
508 /* flow control settings */
509 /* The high water mark must be low enough to fit one full frame
510 * (or the size used for early receive) above it in the Rx FIFO.
511 * Set it to the lower of:
512 * - 90% of the Rx FIFO size, and
513 * - the full Rx FIFO size minus the early receive size (for parts
514 * with ERT support assuming ERT set to E1000_ERT_2048), or
515 * - the full Rx FIFO size minus one full frame */
516 hwm
= min(((pba
<< 10) * 9 / 10),
517 ((pba
<< 10) - unit
->e1ku_frame_max
));
519 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
520 fc
->low_water
= fc
->high_water
- 8;
522 fc
->pause_time
= E1000_FC_PAUSE_TIME
;
524 fc
->type
= fc
->original_type
;
526 /* Allow time for pending master requests to run */
527 e1000_reset_hw((struct e1000_hw
*)unit
->e1ku_Private00
);
529 if (mac
->type
>= e1000_82544
)
531 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_WUC
, 0);
534 if (e1000_init_hw((struct e1000_hw
*)unit
->e1ku_Private00
))
536 D(bug("[%s]: e1000func_reset: Hardware Error\n", unit
->e1ku_name
));
538 /* if (unit->hwflags & HWFLAGS_PHY_PWR_BIT) { */
539 if (mac
->type
>= e1000_82544
&&
540 mac
->type
<= e1000_82547_rev_2
&&
542 ((struct e1000_hw
*)unit
->e1ku_Private00
)->phy
.autoneg_advertised
== ADVERTISE_1000_FULL
) {
543 u32 ctrl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_CTRL
);
544 /* clear phy power management bit if we are in gig only mode,
545 * which if enabled will attempt negotiation to 100Mb, which
546 * can cause a loss of link at power off or driver unload */
547 ctrl
&= ~E1000_CTRL_SWDPIN3
;
548 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_CTRL
, ctrl
);
551 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
552 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
554 e1000_reset_adaptive((struct e1000_hw
*)unit
->e1ku_Private00
);
555 e1000_get_phy_info((struct e1000_hw
*)unit
->e1ku_Private00
);
558 int e1000func_set_mac(struct net_device
*unit
)
560 D(bug("[%s]: e1000func_set_mac()\n", unit
->e1ku_name
));
562 /* 82542 2.0 needs to be in reset to write receive address registers */
563 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
== e1000_82542
)
565 e1000func_enter_82542_rst(unit
);
568 memcpy(((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.addr
, unit
->e1ku_dev_addr
, ETH_ADDRESSSIZE
);
570 e1000_rar_set((struct e1000_hw
*)unit
->e1ku_Private00
, ((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.addr
, 0);
572 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
== e1000_82542
)
574 e1000func_leave_82542_rst(unit
);
580 void e1000func_set_multi(struct net_device
*unit
)
582 struct e1000_mac_info
*mac
= &((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
;
583 struct AddressRange
*range
;
585 ULONG rctl
, mc_count
;
588 D(bug("[%s]: e1000func_set_multi()\n", unit
->e1ku_name
));
590 /* Check for Promiscuous and All Multicast modes */
592 rctl
= E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
);
594 if (unit
->e1ku_ifflags
& IFF_PROMISC
) {
595 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
596 } else if (unit
->e1ku_ifflags
& IFF_ALLMULTI
) {
597 rctl
|= E1000_RCTL_MPE
;
598 rctl
&= ~E1000_RCTL_UPE
;
600 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
);
603 E1000_WRITE_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_RCTL
, rctl
);
605 /* 82542 2.0 needs to be in reset to write receive address registers */
607 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
== e1000_82542
)
608 e1000func_enter_82542_rst(unit
);
610 ListLength(&unit
->e1ku_multicast_ranges
, mc_count
);
614 mta_list
= AllocMem(mc_count
* ETH_ADDRESSSIZE
, MEMF_PUBLIC
| MEMF_CLEAR
);
618 /* The shared function expects a packed array of only addresses. */
619 ForeachNode(&unit
->e1ku_multicast_ranges
, range
) {
620 memcpy(mta_list
+ (i
*ETH_ADDRESSSIZE
), &range
->lower_bound_left
, ETH_ADDRESSSIZE
);
624 e1000_update_mc_addr_list((struct e1000_hw
*)unit
->e1ku_Private00
, mta_list
, i
, 1, mac
->rar_entry_count
);
626 FreeMem(mta_list
, mc_count
* ETH_ADDRESSSIZE
);
628 if (((struct e1000_hw
*)unit
->e1ku_Private00
)->mac
.type
== e1000_82542
)
629 e1000func_leave_82542_rst(unit
);
632 static void e1000func_deinitialize(struct net_device
*unit
)
636 int request_irq(struct net_device
*unit
)
638 OOP_Object
*irq
= OOP_NewObject(NULL
, CLID_Hidd_IRQ
, NULL
);
641 D(bug("[%s]: request_irq()\n", unit
->e1ku_name
));
645 ret
= HIDD_IRQ_AddHandler(irq
, unit
->e1ku_irqhandler
, unit
->e1ku_IRQ
);
646 HIDD_IRQ_AddHandler(irq
, unit
->e1ku_touthandler
, vHidd_IRQ_Timer
);
648 D(bug("[%s]: request_irq: IRQ Handlers configured\n", unit
->e1ku_name
));
650 OOP_DisposeObject(irq
);
660 static void free_irq(struct net_device
*unit
)
662 OOP_Object
*irq
= OOP_NewObject(NULL
, CLID_Hidd_IRQ
, NULL
);
665 HIDD_IRQ_RemHandler(irq
, unit
->e1ku_irqhandler
);
666 HIDD_IRQ_RemHandler(irq
, unit
->e1ku_touthandler
);
667 OOP_DisposeObject(irq
);
671 static int e1000func_setup_tx_resources(struct net_device
*unit
,
672 struct e1000_tx_ring
*tx_ring
)
676 D(bug("[%s]: e1000func_setup_tx_resources()\n", unit
->e1ku_name
));
678 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
680 D(bug("[%s]: e1000func_setup_tx_resources: Configuring for %d buffers\n", unit
->e1ku_name
, tx_ring
->count
));
682 if ((tx_ring
->buffer_info
= AllocMem(size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
) {
683 D(bug("[%s]: e1000func_setup_tx_resources: Unable to allocate memory for the transmit descriptor ring\n", unit
->e1ku_name
));
684 return -E1000_ERR_CONFIG
;
687 D(bug("[%s]: e1000func_setup_tx_resources: Tx Buffer Info @ %p [%d bytes]\n", unit
->e1ku_name
, tx_ring
->buffer_info
, size
));
689 /* round up to nearest 4K */
690 tx_ring
->size
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
691 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
693 if ((tx_ring
->desc
= AllocMem(tx_ring
->size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
) {
695 FreeMem(tx_ring
->buffer_info
, size
);
696 D(bug("[%s]: e1000func_setup_tx_resources: Unable to allocate memory for the transmit descriptor ring\n", unit
->e1ku_name
));
697 return -E1000_ERR_CONFIG
;
699 tx_ring
->dma
= HIDD_PCIDriver_CPUtoPCI(unit
->e1ku_PCIDriver
, (APTR
)tx_ring
->desc
);
701 /* Fix for errata 23, can't cross 64kB boundary */
702 if (!e1000func_check_64k_bound(unit
, tx_ring
->desc
, tx_ring
->size
)) {
703 void *olddesc
= tx_ring
->desc
;
704 D(bug("[%s]: e1000func_setup_tx_resources: tx_ring align check failed: %u bytes at %p\n", unit
->e1ku_name
, tx_ring
->size
, tx_ring
->desc
));
705 /* Try again, without freeing the previous */
706 if ((tx_ring
->desc
= AllocMem(tx_ring
->size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
) {
707 /* Failed allocation, critical failure */
708 FreeMem(olddesc
, tx_ring
->size
);
710 goto setup_tx_desc_die
;
712 tx_ring
->dma
= HIDD_PCIDriver_CPUtoPCI(unit
->e1ku_PCIDriver
, (APTR
)tx_ring
->desc
);
714 if (!e1000func_check_64k_bound(unit
, tx_ring
->desc
,
717 FreeMem(tx_ring
->desc
, tx_ring
->size
);
718 FreeMem(olddesc
, tx_ring
->size
);
720 D(bug("[%s]: e1000func_setup_tx_resources: Unable to allocate aligned memory for the transmit descriptor ring\n", unit
->e1ku_name
));
721 FreeMem(tx_ring
->buffer_info
, size
);
722 return -E1000_ERR_CONFIG
;
724 /* Free old allocation, new allocation was successful */
725 FreeMem(olddesc
, tx_ring
->size
);
729 D(bug("[%s]: e1000func_setup_tx_resources: Tx Ring Descriptors @ %p [%d bytes]\n", unit
->e1ku_name
, tx_ring
->desc
, tx_ring
->size
));
731 tx_ring
->next_to_use
= 0;
732 tx_ring
->next_to_clean
= 0;
737 int e1000func_setup_all_tx_resources(struct net_device
*unit
)
741 for (i
= 0; i
< unit
->e1ku_txRing_QueueSize
; i
++) {
742 err
= e1000func_setup_tx_resources(unit
, &unit
->e1ku_txRing
[i
]);
744 D(bug("[%s]: e1000func_setup_all_tx_resources: Allocation for Tx Queue %u failed\n", unit
->e1ku_name
, i
));
745 for (i
-- ; i
>= 0; i
--)
746 e1000func_free_tx_resources(unit
,
747 &unit
->e1ku_txRing
[i
]);
755 static int e1000func_setup_rx_resources(struct net_device
*unit
,
756 struct e1000_rx_ring
*rx_ring
)
760 D(bug("[%s]: e1000func_setup_rx_resources()\n", unit
->e1ku_name
));
762 buffer_size
= sizeof(struct e1000_rx_buffer
) * rx_ring
->count
;
764 D(bug("[%s]: e1000func_setup_rx_resources: Configuring for %d buffers\n", unit
->e1ku_name
, rx_ring
->count
));
766 if ((rx_ring
->buffer_info
= AllocMem(buffer_size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
) {
767 D(bug("[%s]: e1000func_setup_rx_resources: Unable to allocate memory for the receive ring buffers\n", unit
->e1ku_name
));
768 return -E1000_ERR_CONFIG
;
771 D(bug("[%s]: e1000func_setup_rx_resources: Rx Buffer Info @ %p [%d bytes]\n", unit
->e1ku_name
, rx_ring
->buffer_info
, buffer_size
));
773 /* Round up to nearest 4K */
774 rx_ring
->size
= rx_ring
->count
* sizeof(struct e1000_rx_desc
);
775 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
777 if ((rx_ring
->desc
= AllocMem(rx_ring
->size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
) {
778 D(bug("[%s]: e1000func_setup_rx_resources: Unable to allocate memory for the receive ring descriptors\n", unit
->e1ku_name
));
780 FreeMem(rx_ring
->buffer_info
, buffer_size
);
781 return -E1000_ERR_CONFIG
;
783 rx_ring
->dma
= HIDD_PCIDriver_CPUtoPCI(unit
->e1ku_PCIDriver
, (APTR
)rx_ring
->desc
);
785 /* Fix for errata 23, can't cross 64kB boundary */
786 if (!e1000func_check_64k_bound(unit
, rx_ring
->desc
, rx_ring
->size
)) {
787 void *olddesc
= rx_ring
->desc
;
788 D(bug("[%s]: e1000func_setup_rx_resources: rx_ring align check failed: %u bytes at %p\n", unit
->e1ku_name
, rx_ring
->size
, rx_ring
->desc
));
790 /* Try again, without freeing the previous */
791 if ((rx_ring
->desc
= AllocMem(rx_ring
->size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
) {
792 /* Failed allocation, critical failure */
793 FreeMem(olddesc
, rx_ring
->size
);
795 D(bug("[%s]: e1000func_setup_rx_resources: Unable to allocate memory for the receive descriptor ring\n", unit
->e1ku_name
));
796 goto setup_rx_desc_die
;
798 rx_ring
->dma
= HIDD_PCIDriver_CPUtoPCI(unit
->e1ku_PCIDriver
, (APTR
)rx_ring
->desc
);
800 if (!e1000func_check_64k_bound(unit
, rx_ring
->desc
,
803 FreeMem(rx_ring
->desc
, rx_ring
->size
);
804 FreeMem(olddesc
, rx_ring
->size
);
806 D(bug("[%s]: e1000func_setup_rx_resources: Unable to allocate aligned memory for the receive descriptor ring\n", unit
->e1ku_name
));
807 goto setup_rx_desc_die
;
809 /* Free old allocation, new allocation was successful */
810 FreeMem(olddesc
, rx_ring
->size
);
814 D(bug("[%s]: e1000func_setup_rx_resources: Rx Ring Descriptors @ %p [%d bytes]\n", unit
->e1ku_name
, rx_ring
->desc
, rx_ring
->size
));
816 /* set up ring defaults */
817 rx_ring
->next_to_clean
= 0;
818 rx_ring
->next_to_use
= 0;
823 int e1000func_setup_all_rx_resources(struct net_device
*unit
)
827 for (i
= 0; i
< unit
->e1ku_rxRing_QueueSize
; i
++) {
828 err
= e1000func_setup_rx_resources(unit
, &unit
->e1ku_rxRing
[i
]);
830 D(bug("[%s]: e1000func_setup_all_rx_resources: Allocation for Rx Queue %u failed\n", unit
->e1ku_name
, i
));
831 for (i
-- ; i
>= 0; i
--)
832 e1000func_free_rx_resources(unit
,
833 &unit
->e1ku_rxRing
[i
]);
841 void e1000func_unmap_and_free_tx_resource(struct net_device
*unit
,
842 struct e1000_buffer
*buffer_info
)
844 if (buffer_info
->dma
) {
845 buffer_info
->dma
= NULL
;
847 if (buffer_info
->buffer
) {
848 FreeMem(buffer_info
->buffer
, ETH_MAXPACKETSIZE
);
849 buffer_info
->buffer
= NULL
;
851 /* buffer_info must be completely set up in the transmit path */
854 void e1000func_clean_tx_ring(struct net_device
*unit
,
855 struct e1000_tx_ring
*tx_ring
)
857 struct e1000_buffer
*buffer_info
;
861 D(bug("[%s]: e1000func_clean_tx_ring()\n", unit
->e1ku_name
));
863 /* Free all the Tx ring buffers */
864 for (i
= 0; i
< tx_ring
->count
; i
++) {
865 buffer_info
= &tx_ring
->buffer_info
[i
];
866 e1000func_unmap_and_free_tx_resource(unit
, buffer_info
);
869 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
870 memset(tx_ring
->buffer_info
, 0, size
);
872 /* Zero out the descriptor ring */
874 memset(tx_ring
->desc
, 0, tx_ring
->size
);
876 tx_ring
->next_to_use
= 0;
877 tx_ring
->next_to_clean
= 0;
878 // tx_ring->last_tx_tso = 0;
880 MMIO_W32((APTR
)(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ tx_ring
->tdh
), 0);
881 MMIO_W32((APTR
)(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ tx_ring
->tdt
), 0);
884 void e1000func_free_tx_resources(struct net_device
*unit
,
885 struct e1000_tx_ring
*tx_ring
)
887 D(bug("[%s]: e1000func_free_tx_resources()\n", unit
->e1ku_name
));
889 e1000func_clean_tx_ring(unit
, tx_ring
);
891 FreeMem(tx_ring
->buffer_info
, sizeof(struct e1000_buffer
) * tx_ring
->count
);
892 tx_ring
->buffer_info
= NULL
;
894 FreeMem(tx_ring
->desc
, tx_ring
->size
);
895 tx_ring
->dma
= tx_ring
->desc
= NULL
;
898 void e1000func_clean_rx_ring(struct net_device
*unit
,
899 struct e1000_rx_ring
*rx_ring
)
901 struct e1000_rx_buffer
*buffer_info
;
905 D(bug("[%s]: e1000func_clean_rx_ring()\n", unit
->e1ku_name
));
907 /* Free all the Rx ring buffers */
908 for (i
= 0; i
< rx_ring
->count
; i
++) {
909 buffer_info
= (struct e1000_rx_buffer
*)&rx_ring
->buffer_info
[i
];
910 if (buffer_info
->dma
!= NULL
) {
911 buffer_info
->dma
= NULL
;
913 if (buffer_info
->buffer
)
915 FreeMem(buffer_info
->buffer
, unit
->rx_buffer_len
);
916 buffer_info
->buffer
= NULL
;
920 size
= sizeof(struct e1000_rx_buffer
) * rx_ring
->count
;
921 memset(rx_ring
->buffer_info
, 0, size
);
923 /* Zero out the descriptor ring */
924 memset(rx_ring
->desc
, 0, rx_ring
->size
);
926 rx_ring
->next_to_clean
= 0;
927 rx_ring
->next_to_use
= 0;
929 MMIO_W32((APTR
)(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ rx_ring
->rdh
), 0);
930 MMIO_W32((APTR
)(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ rx_ring
->rdt
), 0);
933 void e1000func_free_rx_resources(struct net_device
*unit
,
934 struct e1000_rx_ring
*rx_ring
)
936 D(bug("[%s]: e1000func_free_rx_resources()\n", unit
->e1ku_name
));
938 e1000func_clean_rx_ring(unit
, rx_ring
);
940 FreeMem(rx_ring
->buffer_info
, sizeof(struct e1000_rx_buffer
) * rx_ring
->count
);
941 rx_ring
->buffer_info
= NULL
;
943 FreeMem(rx_ring
->desc
, rx_ring
->size
);
944 rx_ring
->dma
= rx_ring
->desc
= NULL
;
947 static int e1000func_close(struct net_device
*unit
)
949 unit
->e1ku_ifflags
&= ~IFF_UP
;
951 // ObtainSemaphore(&np->lock);
952 // np->in_shutdown = 1;
953 // ReleaseSemaphore(&np->lock);
955 unit
->e1ku_toutNEED
= FALSE
;
957 // netif_stop_queue(unit);
958 // ObtainSemaphore(&np->lock);
960 // e1000func_deinitialize(unit); // Stop the chipset and set it in 16bit-mode
962 // ReleaseSemaphore(&np->lock);
968 // HIDD_PCIDriver_FreePCIMem(unit->e1ku_PCIDriver, np->rx_buffer);
969 // HIDD_PCIDriver_FreePCIMem(unit->e1ku_PCIDriver, np->tx_buffer);
971 ReportEvents(LIBBASE
, unit
, S2EVENT_OFFLINE
);
976 void e1000func_alloc_rx_buffers(struct net_device
*unit
,
977 struct e1000_rx_ring
*rx_ring
,
980 struct e1000_rx_desc
*rx_desc
;
981 struct e1000_rx_buffer
*buffer_info
;
984 i
= rx_ring
->next_to_use
;
985 buffer_info
= (struct e1000_rx_buffer
*)&rx_ring
->buffer_info
[i
];
987 while (cleaned_count
--) {
988 if ((buffer_info
->buffer
= AllocMem(unit
->rx_buffer_len
, MEMF_PUBLIC
|MEMF_CLEAR
)) != NULL
)
990 D(bug("[%s]: e1000func_alloc_rx_buffers: Buffer %d Allocated @ %p [%d bytes]\n", unit
->e1ku_name
, i
, buffer_info
->buffer
, unit
->rx_buffer_len
));
991 if ((buffer_info
->dma
= HIDD_PCIDriver_CPUtoPCI(unit
->e1ku_PCIDriver
, (APTR
)buffer_info
->buffer
)) == NULL
)
993 D(bug("[%s]: e1000func_alloc_rx_buffers: Failed to Map Buffer %d for DMA!!\n", unit
->e1ku_name
, i
));
995 D(bug("[%s]: e1000func_alloc_rx_buffers: Buffer %d DMA @ %p\n", unit
->e1ku_name
, i
, buffer_info
->dma
));
997 rx_desc
= E1000_RX_DESC(rx_ring
, i
);
998 // rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
999 rx_desc
->buffer_addr
= (UQUAD
)buffer_info
->dma
;
1002 if (++i
== rx_ring
->count
)
1004 buffer_info
= (struct e1000_rx_buffer
*)&rx_ring
->buffer_info
[i
];
1007 if (rx_ring
->next_to_use
!= i
) {
1008 rx_ring
->next_to_use
= i
;
1010 i
= (rx_ring
->count
- 1);
1012 MMIO_W32((APTR
)(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ rx_ring
->rdt
), i
);
1016 void e1000func_configure(struct net_device
*unit
)
1020 D(bug("[%s]: e1000func_configure()\n", unit
->e1ku_name
));
1022 e1000func_set_multi(unit
);
1024 e1000func_configure_tx(unit
);
1025 e1000func_setup_rctl(unit
);
1026 e1000func_configure_rx(unit
);
1027 D(bug("[%s]: e1000func_configure: Tx/Rx Configured\n", unit
->e1ku_name
));
1028 /* call E1000_DESC_UNUSED which always leaves
1029 * at least 1 descriptor unused to make sure
1030 * next_to_use != next_to_clean */
1031 for (i
= 0; i
< unit
->e1ku_rxRing_QueueSize
; i
++) {
1032 D(bug("[%s]: e1000func_configure: Allocating Rx Buffers for queue %d\n", unit
->e1ku_name
, i
));
1033 struct e1000_rx_ring
*ring
= &unit
->e1ku_rxRing
[i
];
1034 e1000func_alloc_rx_buffers(unit
, ring
,
1035 E1000_DESC_UNUSED(ring
));
1037 D(bug("[%s]: e1000func_configure: Finished\n", unit
->e1ku_name
));
1040 BOOL
e1000func_clean_tx_irq(struct net_device
*unit
,
1041 struct e1000_tx_ring
*tx_ring
)
1043 struct e1000_tx_desc
*tx_desc
, *eop_desc
;
1044 struct e1000_buffer
*buffer_info
;
1045 unsigned int i
, eop
;
1046 BOOL cleaned
= FALSE
;
1047 BOOL retval
= FALSE
;
1048 unsigned int total_tx_bytes
=0, total_tx_packets
=0;
1050 D(bug("[%s]: e1000func_clean_tx_irq()\n", unit
->e1ku_name
));
1052 i
= tx_ring
->next_to_clean
;
1053 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1054 eop_desc
= E1000_TX_DESC(tx_ring
, eop
);
1056 D(bug("[%s]: e1000func_clean_tx_irq: starting at %d, eop=%d, desc @ %p\n", unit
->e1ku_name
, i
, eop
, eop_desc
));
1058 while (eop_desc
->upper
.data
& AROS_LONG2LE(E1000_TXD_STAT_DD
)) {
1059 for (cleaned
= FALSE
; !cleaned
; ) {
1060 D(bug("[%s]: e1000func_clean_tx_irq: cleaning Tx buffer %d\n", unit
->e1ku_name
, i
));
1061 tx_desc
= E1000_TX_DESC(tx_ring
, i
);
1062 buffer_info
= &tx_ring
->buffer_info
[i
];
1063 cleaned
= (i
== eop
);
1067 // struct eth_frame *frame = buffer_info->buffer;
1069 // total_tx_bytes += frame->len;
1071 e1000func_unmap_and_free_tx_resource(unit
, buffer_info
);
1072 tx_desc
->upper
.data
= 0;
1074 if (++i
== tx_ring
->count
) i
= 0;
1077 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1078 eop_desc
= E1000_TX_DESC(tx_ring
, eop
);
1081 tx_ring
->next_to_clean
= i
;
1083 #define TX_WAKE_THRESHOLD 32
1084 // if (cleaned && netif_carrier_ok(netdev) &&
1085 // E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) {
1086 /* Make sure that anybody stopping the queue after this
1087 * sees the new next_to_clean.
1091 // if (netif_queue_stopped(netdev) &&
1092 // !(test_bit(__E1000_DOWN, &adapter->state))) {
1093 // netif_wake_queue(netdev);
1094 // ++adapter->restart_queue;
1098 if (unit
->detect_tx_hung
) {
1099 /* Detect a transmit hang in hardware, this serializes the
1100 * check with the clearing of time_stamp and movement of i */
1101 unit
->detect_tx_hung
= FALSE
;
1102 if (tx_ring
->buffer_info
[eop
].dma
&& !(E1000_READ_REG((struct e1000_hw
*)unit
->e1ku_Private00
, E1000_STATUS
) & E1000_STATUS_TXOFF
)) {
1103 /* detected Tx unit hang */
1104 D(bug("[%s]: e1000func_clean_tx_irq: Detected Tx Unit Hang -:\n", unit
->e1ku_name
));
1105 D(bug("[%s]: e1000func_clean_tx_irq: Tx Queue <%lu>\n", unit
->e1ku_name
, (unsigned long)((tx_ring
- unit
->e1ku_txRing
) / sizeof(struct e1000_tx_ring
))));
1106 D(bug("[%s]: e1000func_clean_tx_irq: TDH <%x>\n", unit
->e1ku_name
, MMIO_R32(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ tx_ring
->tdh
)));
1107 D(bug("[%s]: e1000func_clean_tx_irq: TDT <%x>\n", unit
->e1ku_name
, MMIO_R32(((struct e1000_hw
*)unit
->e1ku_Private00
)->hw_addr
+ tx_ring
->tdt
)));
1108 D(bug("[%s]: e1000func_clean_tx_irq: next_to_use <%x>\n", unit
->e1ku_name
, tx_ring
->next_to_use
));
1109 D(bug("[%s]: e1000func_clean_tx_irq: next_to_clean <%x>\n", unit
->e1ku_name
, tx_ring
->next_to_clean
));
1110 D(bug("[%s]: e1000func_clean_tx_irq: buffer_info[next_to_clean]\n", unit
->e1ku_name
));
1111 D(bug("[%s]: e1000func_clean_tx_irq: next_to_watch <%x>\n", unit
->e1ku_name
, eop
));
1112 D(bug("[%s]: e1000func_clean_tx_irq: next_to_watch.status <%x>\n", unit
->e1ku_name
, eop_desc
->upper
.fields
.status
));
1113 // netif_stop_queue(netdev);
1116 unit
->e1ku_stats
.PacketsSent
+= total_tx_packets
;
1117 // adapter->total_tx_bytes += total_tx_bytes;
1118 // adapter->total_tx_packets += total_tx_packets;
1122 BOOL
e1000func_clean_rx_irq(struct net_device
*unit
,
1123 struct e1000_rx_ring
*rx_ring
)
1125 struct e1000_rx_desc
*rx_desc
, *next_rxd
;
1126 struct e1000_rx_buffer
*buffer_info
, *next_buffer
;
1127 struct Opener
*opener
, *opener_tail
;
1128 struct IOSana2Req
*request
, *request_tail
;
1129 struct eth_frame
*frame
;
1131 unsigned int i
, total_rx_bytes
=0, total_rx_packets
=0;
1132 int cleaned_count
= 0;
1135 BOOL accepted
, is_orphan
, cleaned
= FALSE
;
1137 i
= rx_ring
->next_to_clean
;
1138 rx_desc
= E1000_RX_DESC(rx_ring
, i
);
1139 buffer_info
= (struct e1000_rx_buffer
*)&rx_ring
->buffer_info
[i
];
1141 D(bug("[%s]: e1000func_clean_rx_irq: Starting at %d, Rx Desc @ %p, Buffer Info @ %p\n", unit
->e1ku_name
, i
, rx_desc
, buffer_info
));
1143 while (rx_desc
->status
& E1000_RXD_STAT_DD
) {
1145 status
= rx_desc
->status
;
1146 length
= AROS_LE2WORD(rx_desc
->length
);
1148 if (++i
== rx_ring
->count
) i
= 0;
1149 next_rxd
= E1000_RX_DESC(rx_ring
, i
);
1151 next_buffer
= (struct e1000_rx_buffer
*)&rx_ring
->buffer_info
[i
];
1155 /* !EOP means multiple descriptors were used to store a single
1156 * packet, also make sure the frame isn't just CRC only */
1157 if (!(status
& E1000_RXD_STAT_EOP
) || (length
<= ETH_CRCSIZE
)) {
1158 /* All receives must fit into a single buffer */
1159 D(bug("[%s]: e1000func_clean_rx_irq: Receive packet consumed multiple buffers\n", unit
->e1ku_name
));
1164 frame
= (struct eth_frame
*)rx_desc
->buffer_addr
;
1166 if (rx_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
){
1167 UBYTE last_byte
= *(frame
->eth_packet_data
+ length
- 1);
1168 if (TBI_ACCEPT((struct e1000_hw
*)unit
->e1ku_Private00
, status
,
1169 rx_desc
->errors
, length
, last_byte
,
1170 unit
->e1ku_frame_min
,
1171 unit
->e1ku_frame_max
)) {
1173 e1000_tbi_adjust_stats_82543((struct e1000_hw
*)unit
->e1ku_Private00
,
1174 unit
->e1ku_hw_stats
,
1175 length
, frame
->eth_packet_data
,
1176 unit
->e1ku_frame_max
);
1185 /* got a valid packet - forward it to the network core */
1188 /* adjust length to remove Ethernet CRC, this must be
1189 * done after the TBI_ACCEPT workaround above */
1190 length
-= ETH_CRCSIZE
;
1192 /* probably a little skewed due to removing CRC */
1193 total_rx_bytes
+= length
;
1196 /* Receive Checksum Offload */
1197 // e1000func_rx_checksum(unit,
1198 // (ULONG)(status) |
1199 // ((ULONG)(rx_desc->errors) << 24),
1200 // AROS_LE2WORD(rx_desc->csum), skb);
1201 frame
->eth_packet_crc
[0] = (AROS_LE2WORD(rx_desc
->csum
) & 0xff000000) >> 24;
1202 frame
->eth_packet_crc
[1] = (AROS_LE2WORD(rx_desc
->csum
) & 0xff0000) >> 16;
1203 frame
->eth_packet_crc
[2] = (AROS_LE2WORD(rx_desc
->csum
) & 0xff00) >> 8;
1204 frame
->eth_packet_crc
[3] = AROS_LE2WORD(rx_desc
->csum
) & 0xff;
1206 /* Dump contents of frame if DEBUG enabled */
1210 D(bug("[%s]: Rx Buffer %d Packet Dump -:", unit
->e1ku_name
, i
));
1211 for (j
=0; j
<64; j
++) {
1213 D(bug("\n[%s]: %03x:", unit
->e1ku_name
, j
));
1214 D(bug(" %02x", ((unsigned char*)frame
)[j
]));
1220 /* Check for address validity */
1221 if(AddressFilter(LIBBASE
, unit
, frame
->eth_packet_dest
))
1223 /* Packet is addressed to this driver */
1224 D(bug("[%s]: e1000func_clean_rx_irq: Packet IP accepted with type = %d, checksum = %x\n", unit
->e1ku_name
, AROS_BE2WORD(frame
->eth_packet_type
), AROS_LE2WORD(rx_desc
->csum
)));
1226 opener
= (APTR
)unit
->e1ku_Openers
.mlh_Head
;
1227 opener_tail
= (APTR
)&unit
->e1ku_Openers
.mlh_Tail
;
1229 /* Offer packet to every opener */
1230 while(opener
!= opener_tail
)
1232 request
= (APTR
)opener
->read_port
.mp_MsgList
.lh_Head
;
1233 request_tail
= (APTR
)&opener
->read_port
.mp_MsgList
.lh_Tail
;
1236 /* Offer packet to each request until it's accepted */
1237 while((request
!= request_tail
) && !accepted
)
1239 if (request
->ios2_PacketType
== AROS_BE2WORD(frame
->eth_packet_type
))
1241 D(bug("[%s]: e1000func_clean_rx_irq: copy packet for opener ..\n", unit
->e1ku_name
));
1242 CopyPacket(LIBBASE
, unit
, request
, length
, AROS_BE2WORD(frame
->eth_packet_type
), frame
);
1246 (struct IOSana2Req
*)request
->ios2_Req
.io_Message
.mn_Node
.ln_Succ
;
1252 opener
= (APTR
)opener
->node
.mln_Succ
;
1255 /* If packet was unwanted, give it to S2_READORPHAN request */
1258 unit
->e1ku_stats
.UnknownTypesReceived
++;
1260 if(!IsMsgPortEmpty(unit
->e1ku_request_ports
[ADOPT_QUEUE
]))
1262 CopyPacket(LIBBASE
, unit
,
1263 (APTR
)unit
->e1ku_request_ports
[ADOPT_QUEUE
]->
1264 mp_MsgList
.lh_Head
, length
, AROS_BE2WORD(frame
->eth_packet_type
), frame
);
1265 D(bug("[%s]: e1000func_clean_rx_irq: packet copied to orphan queue\n", unit
->e1ku_name
));
1271 rx_desc
->status
= 0;
1273 /* use prefetched values */
1275 buffer_info
= next_buffer
;
1277 rx_ring
->next_to_clean
= i
;
1279 D(bug("[%s]: e1000func_clean_rx_irq: Next to clean = %d\n", unit
->e1ku_name
, rx_ring
->next_to_clean
));
1281 // if ((cleaned_count = E1000_DESC_UNUSED(rx_ring)))
1282 // writel(i, ((struct e1000_hw *)unit->e1ku_Private00)->hw_addr + rx_ring->rdt);
1284 unit
->e1ku_stats
.PacketsReceived
+= total_rx_packets
;
1285 //adapter->total_rx_packets += total_rx_packets;
1286 //adapter->total_rx_bytes += total_rx_bytes;
1287 D(bug("[%s]: e1000func_clean_rx_irq: Received %d packets (%d bytes)\n", unit
->e1ku_name
, total_rx_packets
, total_rx_bytes
));
1292 /** OS SUPPORT CALLS FOR INTEL CODE **/
1294 void e1000_pci_clear_mwi(struct e1000_hw
*hw
)
1296 D(bug("[%s]: e1000_pci_clear_mwi()\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
));
1297 #warning "TODO: How to CLEAR Memory Write Invalidate on AROS!"
1300 void e1000_pci_set_mwi(struct e1000_hw
*hw
)
1302 D(bug("[%s]: e1000_pci_set_mwi()\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
));
1303 #warning "TODO: How to SET Memory Write Invalidate on AROS!"
1306 LONG
e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw
*hw
, ULONG size
)
1308 D(bug("[%s]: e1000_alloc_zeroed_dev_spec_struct()\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
));
1310 if ((hw
->dev_spec
= AllocMem(size
, MEMF_PUBLIC
| MEMF_CLEAR
)) == NULL
)
1311 return -E1000_ERR_CONFIG
;
1313 return E1000_SUCCESS
;
1316 LONG
e1000_read_pcie_cap_reg(struct e1000_hw
*hw
, ULONG reg
, UWORD
*value
)
1318 D(bug("[%s]: e1000_read_pcie_cap_reg()\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
));
1319 #warning "TODO: How to READ PCI-Express Cap Register on AROS!"
1323 void e1000_free_dev_spec_struct(struct e1000_hw
*hw
)
1325 D(bug("[%s]: e1000_free_dev_spec_struct()\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
));
1328 FreeMem(hw
->dev_spec
, hw
->dev_spec_size
);
1331 void e1000_read_pci_cfg(struct e1000_hw
*hw
, ULONG reg
, UWORD
*value
)
1333 struct pHidd_PCIDevice_ReadConfigWord pcireadmsg
;
1334 D(bug("[%s]: e1000_read_pci_cfg()\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
));
1335 pcireadmsg
.mID
= OOP_GetMethodID(IID_Hidd_PCIDevice
, moHidd_PCIDevice_ReadConfigWord
);
1336 pcireadmsg
.reg
= reg
;
1337 *value
= (UWORD
)OOP_DoMethod(((struct e1000Unit
*)hw
->back
)->e1ku_PCIDevice
, &pcireadmsg
);
1338 D(bug("[%s]: e1000_read_pci_cfg: returning %x\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
, *value
));
1341 void e1000_write_pci_cfg(struct e1000_hw
*hw
, ULONG reg
, UWORD
*value
)
1343 struct pHidd_PCIDevice_WriteConfigWord pciwritemsg
;
1344 D(bug("[%s]: e1000_write_pci_cfg(%d, %x)\n", ((struct e1000Unit
*)hw
->back
)->e1ku_name
, reg
, *value
));
1345 pciwritemsg
.mID
= OOP_GetMethodID(IID_Hidd_PCIDevice
, moHidd_PCIDevice_WriteConfigWord
);
1346 pciwritemsg
.reg
= reg
;
1347 pciwritemsg
.val
= *value
;
1348 OOP_DoMethod(((struct e1000Unit
*)hw
->back
)->e1ku_PCIDevice
, &pciwritemsg
);