Staging: strip: delete the driver
[linux/fpc-iii.git] / drivers / net / sfc / falcon.c
blob08278e7302b386145f75c408c1c3eef193981978
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include <linux/slab.h>
19 #include "net_driver.h"
20 #include "bitfield.h"
21 #include "efx.h"
22 #include "mac.h"
23 #include "spi.h"
24 #include "nic.h"
25 #include "regs.h"
26 #include "io.h"
27 #include "mdio_10g.h"
28 #include "phy.h"
29 #include "workarounds.h"
31 /* Hardware control for SFC4000 (aka Falcon). */
33 static const unsigned int
34 /* "Large" EEPROM device: Atmel AT25640 or similar
35 * 8 KB, 16-bit address, 32 B write block */
36 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
37 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
38 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
39 /* Default flash device: Atmel AT25F1024
40 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
41 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
42 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
43 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
44 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
45 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
47 /**************************************************************************
49 * I2C bus - this is a bit-bashing interface using GPIO pins
50 * Note that it uses the output enables to tristate the outputs
51 * SDA is the data pin and SCL is the clock
53 **************************************************************************
55 static void falcon_setsda(void *data, int state)
57 struct efx_nic *efx = (struct efx_nic *)data;
58 efx_oword_t reg;
60 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
61 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
62 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
65 static void falcon_setscl(void *data, int state)
67 struct efx_nic *efx = (struct efx_nic *)data;
68 efx_oword_t reg;
70 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
71 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
72 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
75 static int falcon_getsda(void *data)
77 struct efx_nic *efx = (struct efx_nic *)data;
78 efx_oword_t reg;
80 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
81 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
84 static int falcon_getscl(void *data)
86 struct efx_nic *efx = (struct efx_nic *)data;
87 efx_oword_t reg;
89 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
90 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
93 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
94 .setsda = falcon_setsda,
95 .setscl = falcon_setscl,
96 .getsda = falcon_getsda,
97 .getscl = falcon_getscl,
98 .udelay = 5,
99 /* Wait up to 50 ms for slave to let us pull SCL high */
100 .timeout = DIV_ROUND_UP(HZ, 20),
103 static void falcon_push_irq_moderation(struct efx_channel *channel)
105 efx_dword_t timer_cmd;
106 struct efx_nic *efx = channel->efx;
108 /* Set timer register */
109 if (channel->irq_moderation) {
110 EFX_POPULATE_DWORD_2(timer_cmd,
111 FRF_AB_TC_TIMER_MODE,
112 FFE_BB_TIMER_MODE_INT_HLDOFF,
113 FRF_AB_TC_TIMER_VAL,
114 channel->irq_moderation - 1);
115 } else {
116 EFX_POPULATE_DWORD_2(timer_cmd,
117 FRF_AB_TC_TIMER_MODE,
118 FFE_BB_TIMER_MODE_DIS,
119 FRF_AB_TC_TIMER_VAL, 0);
121 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
122 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
123 channel->channel);
126 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
128 static void falcon_prepare_flush(struct efx_nic *efx)
130 falcon_deconfigure_mac_wrapper(efx);
132 /* Wait for the tx and rx fifo's to get to the next packet boundary
133 * (~1ms without back-pressure), then to drain the remainder of the
134 * fifo's at data path speeds (negligible), with a healthy margin. */
135 msleep(10);
138 /* Acknowledge a legacy interrupt from Falcon
140 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
142 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
143 * BIU. Interrupt acknowledge is read sensitive so must write instead
144 * (then read to ensure the BIU collector is flushed)
146 * NB most hardware supports MSI interrupts
148 inline void falcon_irq_ack_a1(struct efx_nic *efx)
150 efx_dword_t reg;
152 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
153 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
154 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
158 irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
160 struct efx_nic *efx = dev_id;
161 efx_oword_t *int_ker = efx->irq_status.addr;
162 struct efx_channel *channel;
163 int syserr;
164 int queues;
166 /* Check to see if this is our interrupt. If it isn't, we
167 * exit without having touched the hardware.
169 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
170 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
171 raw_smp_processor_id());
172 return IRQ_NONE;
174 efx->last_irq_cpu = raw_smp_processor_id();
175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
178 /* Check to see if we have a serious error condition */
179 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
180 if (unlikely(syserr))
181 return efx_nic_fatal_interrupt(efx);
183 /* Determine interrupting queues, clear interrupt status
184 * register and acknowledge the device interrupt.
186 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
187 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
188 EFX_ZERO_OWORD(*int_ker);
189 wmb(); /* Ensure the vector is cleared before interrupt ack */
190 falcon_irq_ack_a1(efx);
192 /* Schedule processing of any interrupting queues */
193 channel = &efx->channel[0];
194 while (queues) {
195 if (queues & 0x01)
196 efx_schedule_channel(channel);
197 channel++;
198 queues >>= 1;
201 return IRQ_HANDLED;
203 /**************************************************************************
205 * EEPROM/flash
207 **************************************************************************
210 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
212 static int falcon_spi_poll(struct efx_nic *efx)
214 efx_oword_t reg;
215 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
216 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
219 /* Wait for SPI command completion */
220 static int falcon_spi_wait(struct efx_nic *efx)
222 /* Most commands will finish quickly, so we start polling at
223 * very short intervals. Sometimes the command may have to
224 * wait for VPD or expansion ROM access outside of our
225 * control, so we allow up to 100 ms. */
226 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
227 int i;
229 for (i = 0; i < 10; i++) {
230 if (!falcon_spi_poll(efx))
231 return 0;
232 udelay(10);
235 for (;;) {
236 if (!falcon_spi_poll(efx))
237 return 0;
238 if (time_after_eq(jiffies, timeout)) {
239 EFX_ERR(efx, "timed out waiting for SPI\n");
240 return -ETIMEDOUT;
242 schedule_timeout_uninterruptible(1);
246 int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
247 unsigned int command, int address,
248 const void *in, void *out, size_t len)
250 bool addressed = (address >= 0);
251 bool reading = (out != NULL);
252 efx_oword_t reg;
253 int rc;
255 /* Input validation */
256 if (len > FALCON_SPI_MAX_LEN)
257 return -EINVAL;
258 BUG_ON(!mutex_is_locked(&efx->spi_lock));
260 /* Check that previous command is not still running */
261 rc = falcon_spi_poll(efx);
262 if (rc)
263 return rc;
265 /* Program address register, if we have an address */
266 if (addressed) {
267 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
268 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
271 /* Program data register, if we have data */
272 if (in != NULL) {
273 memcpy(&reg, in, len);
274 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
277 /* Issue read/write command */
278 EFX_POPULATE_OWORD_7(reg,
279 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
280 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
281 FRF_AB_EE_SPI_HCMD_DABCNT, len,
282 FRF_AB_EE_SPI_HCMD_READ, reading,
283 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
284 FRF_AB_EE_SPI_HCMD_ADBCNT,
285 (addressed ? spi->addr_len : 0),
286 FRF_AB_EE_SPI_HCMD_ENC, command);
287 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
289 /* Wait for read/write to complete */
290 rc = falcon_spi_wait(efx);
291 if (rc)
292 return rc;
294 /* Read data */
295 if (out != NULL) {
296 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
297 memcpy(out, &reg, len);
300 return 0;
303 static size_t
304 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
306 return min(FALCON_SPI_MAX_LEN,
307 (spi->block_size - (start & (spi->block_size - 1))));
310 static inline u8
311 efx_spi_munge_command(const struct efx_spi_device *spi,
312 const u8 command, const unsigned int address)
314 return command | (((address >> 8) & spi->munge_address) << 3);
317 /* Wait up to 10 ms for buffered write completion */
319 falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
321 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
322 u8 status;
323 int rc;
325 for (;;) {
326 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
327 &status, sizeof(status));
328 if (rc)
329 return rc;
330 if (!(status & SPI_STATUS_NRDY))
331 return 0;
332 if (time_after_eq(jiffies, timeout)) {
333 EFX_ERR(efx, "SPI write timeout on device %d"
334 " last status=0x%02x\n",
335 spi->device_id, status);
336 return -ETIMEDOUT;
338 schedule_timeout_uninterruptible(1);
342 int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
343 loff_t start, size_t len, size_t *retlen, u8 *buffer)
345 size_t block_len, pos = 0;
346 unsigned int command;
347 int rc = 0;
349 while (pos < len) {
350 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
352 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
353 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
354 buffer + pos, block_len);
355 if (rc)
356 break;
357 pos += block_len;
359 /* Avoid locking up the system */
360 cond_resched();
361 if (signal_pending(current)) {
362 rc = -EINTR;
363 break;
367 if (retlen)
368 *retlen = pos;
369 return rc;
373 falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
374 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
376 u8 verify_buffer[FALCON_SPI_MAX_LEN];
377 size_t block_len, pos = 0;
378 unsigned int command;
379 int rc = 0;
381 while (pos < len) {
382 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
383 if (rc)
384 break;
386 block_len = min(len - pos,
387 falcon_spi_write_limit(spi, start + pos));
388 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
389 rc = falcon_spi_cmd(efx, spi, command, start + pos,
390 buffer + pos, NULL, block_len);
391 if (rc)
392 break;
394 rc = falcon_spi_wait_write(efx, spi);
395 if (rc)
396 break;
398 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
399 rc = falcon_spi_cmd(efx, spi, command, start + pos,
400 NULL, verify_buffer, block_len);
401 if (memcmp(verify_buffer, buffer + pos, block_len)) {
402 rc = -EIO;
403 break;
406 pos += block_len;
408 /* Avoid locking up the system */
409 cond_resched();
410 if (signal_pending(current)) {
411 rc = -EINTR;
412 break;
416 if (retlen)
417 *retlen = pos;
418 return rc;
421 /**************************************************************************
423 * MAC wrapper
425 **************************************************************************
428 static void falcon_push_multicast_hash(struct efx_nic *efx)
430 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
432 WARN_ON(!mutex_is_locked(&efx->mac_lock));
434 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
435 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
438 static void falcon_reset_macs(struct efx_nic *efx)
440 struct falcon_nic_data *nic_data = efx->nic_data;
441 efx_oword_t reg, mac_ctrl;
442 int count;
444 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
445 /* It's not safe to use GLB_CTL_REG to reset the
446 * macs, so instead use the internal MAC resets
448 if (!EFX_IS10G(efx)) {
449 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
450 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
451 udelay(1000);
453 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
454 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
455 udelay(1000);
456 return;
457 } else {
458 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
459 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
461 for (count = 0; count < 10000; count++) {
462 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
463 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
465 return;
466 udelay(10);
469 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
473 /* Mac stats will fail whist the TX fifo is draining */
474 WARN_ON(nic_data->stats_disable_count == 0);
476 efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
477 EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
478 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
480 efx_reado(efx, &reg, FR_AB_GLB_CTL);
481 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
482 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
483 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
484 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
486 count = 0;
487 while (1) {
488 efx_reado(efx, &reg, FR_AB_GLB_CTL);
489 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
490 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
491 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
492 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
493 count);
494 break;
496 if (count > 20) {
497 EFX_ERR(efx, "MAC reset failed\n");
498 break;
500 count++;
501 udelay(10);
504 /* Ensure the correct MAC is selected before statistics
505 * are re-enabled by the caller */
506 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
509 void falcon_drain_tx_fifo(struct efx_nic *efx)
511 efx_oword_t reg;
513 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
514 (efx->loopback_mode != LOOPBACK_NONE))
515 return;
517 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
518 /* There is no point in draining more than once */
519 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
520 return;
522 falcon_reset_macs(efx);
525 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
527 efx_oword_t reg;
529 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
530 return;
532 /* Isolate the MAC -> RX */
533 efx_reado(efx, &reg, FR_AZ_RX_CFG);
534 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
535 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
537 /* Isolate TX -> MAC */
538 falcon_drain_tx_fifo(efx);
541 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
543 struct efx_link_state *link_state = &efx->link_state;
544 efx_oword_t reg;
545 int link_speed;
547 switch (link_state->speed) {
548 case 10000: link_speed = 3; break;
549 case 1000: link_speed = 2; break;
550 case 100: link_speed = 1; break;
551 default: link_speed = 0; break;
553 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
554 * as advertised. Disable to ensure packets are not
555 * indefinitely held and TX queue can be flushed at any point
556 * while the link is down. */
557 EFX_POPULATE_OWORD_5(reg,
558 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
559 FRF_AB_MAC_BCAD_ACPT, 1,
560 FRF_AB_MAC_UC_PROM, efx->promiscuous,
561 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
562 FRF_AB_MAC_SPEED, link_speed);
563 /* On B0, MAC backpressure can be disabled and packets get
564 * discarded. */
565 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
566 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
567 !link_state->up);
570 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
572 /* Restore the multicast hash registers. */
573 falcon_push_multicast_hash(efx);
575 efx_reado(efx, &reg, FR_AZ_RX_CFG);
576 /* Enable XOFF signal from RX FIFO (we enabled it during NIC
577 * initialisation but it may read back as 0) */
578 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
579 /* Unisolate the MAC -> RX */
580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
581 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
582 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
585 static void falcon_stats_request(struct efx_nic *efx)
587 struct falcon_nic_data *nic_data = efx->nic_data;
588 efx_oword_t reg;
590 WARN_ON(nic_data->stats_pending);
591 WARN_ON(nic_data->stats_disable_count);
593 if (nic_data->stats_dma_done == NULL)
594 return; /* no mac selected */
596 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
597 nic_data->stats_pending = true;
598 wmb(); /* ensure done flag is clear */
600 /* Initiate DMA transfer of stats */
601 EFX_POPULATE_OWORD_2(reg,
602 FRF_AB_MAC_STAT_DMA_CMD, 1,
603 FRF_AB_MAC_STAT_DMA_ADR,
604 efx->stats_buffer.dma_addr);
605 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
607 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
610 static void falcon_stats_complete(struct efx_nic *efx)
612 struct falcon_nic_data *nic_data = efx->nic_data;
614 if (!nic_data->stats_pending)
615 return;
617 nic_data->stats_pending = 0;
618 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
619 rmb(); /* read the done flag before the stats */
620 efx->mac_op->update_stats(efx);
621 } else {
622 EFX_ERR(efx, "timed out waiting for statistics\n");
626 static void falcon_stats_timer_func(unsigned long context)
628 struct efx_nic *efx = (struct efx_nic *)context;
629 struct falcon_nic_data *nic_data = efx->nic_data;
631 spin_lock(&efx->stats_lock);
633 falcon_stats_complete(efx);
634 if (nic_data->stats_disable_count == 0)
635 falcon_stats_request(efx);
637 spin_unlock(&efx->stats_lock);
640 static void falcon_switch_mac(struct efx_nic *efx);
642 static bool falcon_loopback_link_poll(struct efx_nic *efx)
644 struct efx_link_state old_state = efx->link_state;
646 WARN_ON(!mutex_is_locked(&efx->mac_lock));
647 WARN_ON(!LOOPBACK_INTERNAL(efx));
649 efx->link_state.fd = true;
650 efx->link_state.fc = efx->wanted_fc;
651 efx->link_state.up = true;
653 if (efx->loopback_mode == LOOPBACK_GMAC)
654 efx->link_state.speed = 1000;
655 else
656 efx->link_state.speed = 10000;
658 return !efx_link_state_equal(&efx->link_state, &old_state);
661 static int falcon_reconfigure_port(struct efx_nic *efx)
663 int rc;
665 WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
667 /* Poll the PHY link state *before* reconfiguring it. This means we
668 * will pick up the correct speed (in loopback) to select the correct
669 * MAC.
671 if (LOOPBACK_INTERNAL(efx))
672 falcon_loopback_link_poll(efx);
673 else
674 efx->phy_op->poll(efx);
676 falcon_stop_nic_stats(efx);
677 falcon_deconfigure_mac_wrapper(efx);
679 falcon_switch_mac(efx);
681 efx->phy_op->reconfigure(efx);
682 rc = efx->mac_op->reconfigure(efx);
683 BUG_ON(rc);
685 falcon_start_nic_stats(efx);
687 /* Synchronise efx->link_state with the kernel */
688 efx_link_status_changed(efx);
690 return 0;
693 /**************************************************************************
695 * PHY access via GMII
697 **************************************************************************
700 /* Wait for GMII access to complete */
701 static int falcon_gmii_wait(struct efx_nic *efx)
703 efx_oword_t md_stat;
704 int count;
706 /* wait upto 50ms - taken max from datasheet */
707 for (count = 0; count < 5000; count++) {
708 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
709 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
710 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
711 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
712 EFX_ERR(efx, "error from GMII access "
713 EFX_OWORD_FMT"\n",
714 EFX_OWORD_VAL(md_stat));
715 return -EIO;
717 return 0;
719 udelay(10);
721 EFX_ERR(efx, "timed out waiting for GMII\n");
722 return -ETIMEDOUT;
725 /* Write an MDIO register of a PHY connected to Falcon. */
726 static int falcon_mdio_write(struct net_device *net_dev,
727 int prtad, int devad, u16 addr, u16 value)
729 struct efx_nic *efx = netdev_priv(net_dev);
730 efx_oword_t reg;
731 int rc;
733 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
734 prtad, devad, addr, value);
736 mutex_lock(&efx->mdio_lock);
738 /* Check MDIO not currently being accessed */
739 rc = falcon_gmii_wait(efx);
740 if (rc)
741 goto out;
743 /* Write the address/ID register */
744 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
745 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
747 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
748 FRF_AB_MD_DEV_ADR, devad);
749 efx_writeo(efx, &reg, FR_AB_MD_ID);
751 /* Write data */
752 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
753 efx_writeo(efx, &reg, FR_AB_MD_TXD);
755 EFX_POPULATE_OWORD_2(reg,
756 FRF_AB_MD_WRC, 1,
757 FRF_AB_MD_GC, 0);
758 efx_writeo(efx, &reg, FR_AB_MD_CS);
760 /* Wait for data to be written */
761 rc = falcon_gmii_wait(efx);
762 if (rc) {
763 /* Abort the write operation */
764 EFX_POPULATE_OWORD_2(reg,
765 FRF_AB_MD_WRC, 0,
766 FRF_AB_MD_GC, 1);
767 efx_writeo(efx, &reg, FR_AB_MD_CS);
768 udelay(10);
771 out:
772 mutex_unlock(&efx->mdio_lock);
773 return rc;
776 /* Read an MDIO register of a PHY connected to Falcon. */
777 static int falcon_mdio_read(struct net_device *net_dev,
778 int prtad, int devad, u16 addr)
780 struct efx_nic *efx = netdev_priv(net_dev);
781 efx_oword_t reg;
782 int rc;
784 mutex_lock(&efx->mdio_lock);
786 /* Check MDIO not currently being accessed */
787 rc = falcon_gmii_wait(efx);
788 if (rc)
789 goto out;
791 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
792 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
794 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
795 FRF_AB_MD_DEV_ADR, devad);
796 efx_writeo(efx, &reg, FR_AB_MD_ID);
798 /* Request data to be read */
799 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
800 efx_writeo(efx, &reg, FR_AB_MD_CS);
802 /* Wait for data to become available */
803 rc = falcon_gmii_wait(efx);
804 if (rc == 0) {
805 efx_reado(efx, &reg, FR_AB_MD_RXD);
806 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
807 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
808 prtad, devad, addr, rc);
809 } else {
810 /* Abort the read operation */
811 EFX_POPULATE_OWORD_2(reg,
812 FRF_AB_MD_RIC, 0,
813 FRF_AB_MD_GC, 1);
814 efx_writeo(efx, &reg, FR_AB_MD_CS);
816 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
817 prtad, devad, addr, rc);
820 out:
821 mutex_unlock(&efx->mdio_lock);
822 return rc;
825 static void falcon_clock_mac(struct efx_nic *efx)
827 unsigned strap_val;
828 efx_oword_t nic_stat;
830 /* Configure the NIC generated MAC clock correctly */
831 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
832 strap_val = EFX_IS10G(efx) ? 5 : 3;
833 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
834 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
835 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
836 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
837 } else {
838 /* Falcon A1 does not support 1G/10G speed switching
839 * and must not be used with a PHY that does. */
840 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
841 strap_val);
845 static void falcon_switch_mac(struct efx_nic *efx)
847 struct efx_mac_operations *old_mac_op = efx->mac_op;
848 struct falcon_nic_data *nic_data = efx->nic_data;
849 unsigned int stats_done_offset;
851 WARN_ON(!mutex_is_locked(&efx->mac_lock));
852 WARN_ON(nic_data->stats_disable_count == 0);
854 efx->mac_op = (EFX_IS10G(efx) ?
855 &falcon_xmac_operations : &falcon_gmac_operations);
857 if (EFX_IS10G(efx))
858 stats_done_offset = XgDmaDone_offset;
859 else
860 stats_done_offset = GDmaDone_offset;
861 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
863 if (old_mac_op == efx->mac_op)
864 return;
866 falcon_clock_mac(efx);
868 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
869 /* Not all macs support a mac-level link state */
870 efx->xmac_poll_required = false;
871 falcon_reset_macs(efx);
874 /* This call is responsible for hooking in the MAC and PHY operations */
875 static int falcon_probe_port(struct efx_nic *efx)
877 int rc;
879 switch (efx->phy_type) {
880 case PHY_TYPE_SFX7101:
881 efx->phy_op = &falcon_sfx7101_phy_ops;
882 break;
883 case PHY_TYPE_SFT9001A:
884 case PHY_TYPE_SFT9001B:
885 efx->phy_op = &falcon_sft9001_phy_ops;
886 break;
887 case PHY_TYPE_QT2022C2:
888 case PHY_TYPE_QT2025C:
889 efx->phy_op = &falcon_qt202x_phy_ops;
890 break;
891 default:
892 EFX_ERR(efx, "Unknown PHY type %d\n",
893 efx->phy_type);
894 return -ENODEV;
897 /* Fill out MDIO structure and loopback modes */
898 efx->mdio.mdio_read = falcon_mdio_read;
899 efx->mdio.mdio_write = falcon_mdio_write;
900 rc = efx->phy_op->probe(efx);
901 if (rc != 0)
902 return rc;
904 /* Initial assumption */
905 efx->link_state.speed = 10000;
906 efx->link_state.fd = true;
908 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
909 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
910 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
911 else
912 efx->wanted_fc = EFX_FC_RX;
913 if (efx->mdio.mmds & MDIO_DEVS_AN)
914 efx->wanted_fc |= EFX_FC_AUTO;
916 /* Allocate buffer for stats */
917 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
918 FALCON_MAC_STATS_SIZE);
919 if (rc)
920 return rc;
921 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
922 (u64)efx->stats_buffer.dma_addr,
923 efx->stats_buffer.addr,
924 (u64)virt_to_phys(efx->stats_buffer.addr));
926 return 0;
929 static void falcon_remove_port(struct efx_nic *efx)
931 efx->phy_op->remove(efx);
932 efx_nic_free_buffer(efx, &efx->stats_buffer);
935 /**************************************************************************
937 * Falcon test code
939 **************************************************************************/
941 static int
942 falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
944 struct falcon_nvconfig *nvconfig;
945 struct efx_spi_device *spi;
946 void *region;
947 int rc, magic_num, struct_ver;
948 __le16 *word, *limit;
949 u32 csum;
951 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
952 if (!spi)
953 return -EINVAL;
955 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
956 if (!region)
957 return -ENOMEM;
958 nvconfig = region + FALCON_NVCONFIG_OFFSET;
960 mutex_lock(&efx->spi_lock);
961 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
962 mutex_unlock(&efx->spi_lock);
963 if (rc) {
964 EFX_ERR(efx, "Failed to read %s\n",
965 efx->spi_flash ? "flash" : "EEPROM");
966 rc = -EIO;
967 goto out;
970 magic_num = le16_to_cpu(nvconfig->board_magic_num);
971 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
973 rc = -EINVAL;
974 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
975 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
976 goto out;
978 if (struct_ver < 2) {
979 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
980 goto out;
981 } else if (struct_ver < 4) {
982 word = &nvconfig->board_magic_num;
983 limit = (__le16 *) (nvconfig + 1);
984 } else {
985 word = region;
986 limit = region + FALCON_NVCONFIG_END;
988 for (csum = 0; word < limit; ++word)
989 csum += le16_to_cpu(*word);
991 if (~csum & 0xffff) {
992 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
993 goto out;
996 rc = 0;
997 if (nvconfig_out)
998 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1000 out:
1001 kfree(region);
1002 return rc;
1005 static int falcon_test_nvram(struct efx_nic *efx)
1007 return falcon_read_nvram(efx, NULL);
1010 static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1011 { FR_AZ_ADR_REGION,
1012 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1013 { FR_AZ_RX_CFG,
1014 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1015 { FR_AZ_TX_CFG,
1016 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1017 { FR_AZ_TX_RESERVED,
1018 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1019 { FR_AB_MAC_CTRL,
1020 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1021 { FR_AZ_SRM_TX_DC_CFG,
1022 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1023 { FR_AZ_RX_DC_CFG,
1024 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1025 { FR_AZ_RX_DC_PF_WM,
1026 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1027 { FR_BZ_DP_CTRL,
1028 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1029 { FR_AB_GM_CFG2,
1030 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1031 { FR_AB_GMF_CFG0,
1032 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1033 { FR_AB_XM_GLB_CFG,
1034 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1035 { FR_AB_XM_TX_CFG,
1036 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1037 { FR_AB_XM_RX_CFG,
1038 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1039 { FR_AB_XM_RX_PARAM,
1040 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1041 { FR_AB_XM_FC,
1042 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1043 { FR_AB_XM_ADR_LO,
1044 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1045 { FR_AB_XX_SD_CTL,
1046 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1049 static int falcon_b0_test_registers(struct efx_nic *efx)
1051 return efx_nic_test_registers(efx, falcon_b0_register_tests,
1052 ARRAY_SIZE(falcon_b0_register_tests));
1055 /**************************************************************************
1057 * Device reset
1059 **************************************************************************
1062 /* Resets NIC to known state. This routine must be called in process
1063 * context and is allowed to sleep. */
1064 static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1066 struct falcon_nic_data *nic_data = efx->nic_data;
1067 efx_oword_t glb_ctl_reg_ker;
1068 int rc;
1070 EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
1072 /* Initiate device reset */
1073 if (method == RESET_TYPE_WORLD) {
1074 rc = pci_save_state(efx->pci_dev);
1075 if (rc) {
1076 EFX_ERR(efx, "failed to backup PCI state of primary "
1077 "function prior to hardware reset\n");
1078 goto fail1;
1080 if (efx_nic_is_dual_func(efx)) {
1081 rc = pci_save_state(nic_data->pci_dev2);
1082 if (rc) {
1083 EFX_ERR(efx, "failed to backup PCI state of "
1084 "secondary function prior to "
1085 "hardware reset\n");
1086 goto fail2;
1090 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1091 FRF_AB_EXT_PHY_RST_DUR,
1092 FFE_AB_EXT_PHY_RST_DUR_10240US,
1093 FRF_AB_SWRST, 1);
1094 } else {
1095 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1096 /* exclude PHY from "invisible" reset */
1097 FRF_AB_EXT_PHY_RST_CTL,
1098 method == RESET_TYPE_INVISIBLE,
1099 /* exclude EEPROM/flash and PCIe */
1100 FRF_AB_PCIE_CORE_RST_CTL, 1,
1101 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
1102 FRF_AB_PCIE_SD_RST_CTL, 1,
1103 FRF_AB_EE_RST_CTL, 1,
1104 FRF_AB_EXT_PHY_RST_DUR,
1105 FFE_AB_EXT_PHY_RST_DUR_10240US,
1106 FRF_AB_SWRST, 1);
1108 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1110 EFX_LOG(efx, "waiting for hardware reset\n");
1111 schedule_timeout_uninterruptible(HZ / 20);
1113 /* Restore PCI configuration if needed */
1114 if (method == RESET_TYPE_WORLD) {
1115 if (efx_nic_is_dual_func(efx)) {
1116 rc = pci_restore_state(nic_data->pci_dev2);
1117 if (rc) {
1118 EFX_ERR(efx, "failed to restore PCI config for "
1119 "the secondary function\n");
1120 goto fail3;
1123 rc = pci_restore_state(efx->pci_dev);
1124 if (rc) {
1125 EFX_ERR(efx, "failed to restore PCI config for the "
1126 "primary function\n");
1127 goto fail4;
1129 EFX_LOG(efx, "successfully restored PCI config\n");
1132 /* Assert that reset complete */
1133 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1134 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1135 rc = -ETIMEDOUT;
1136 EFX_ERR(efx, "timed out waiting for hardware reset\n");
1137 goto fail5;
1139 EFX_LOG(efx, "hardware reset complete\n");
1141 return 0;
1143 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1144 fail2:
1145 fail3:
1146 pci_restore_state(efx->pci_dev);
1147 fail1:
1148 fail4:
1149 fail5:
1150 return rc;
1153 static void falcon_monitor(struct efx_nic *efx)
1155 bool link_changed;
1156 int rc;
1158 BUG_ON(!mutex_is_locked(&efx->mac_lock));
1160 rc = falcon_board(efx)->type->monitor(efx);
1161 if (rc) {
1162 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1163 (rc == -ERANGE) ? "reported fault" : "failed");
1164 efx->phy_mode |= PHY_MODE_LOW_POWER;
1165 rc = __efx_reconfigure_port(efx);
1166 WARN_ON(rc);
1169 if (LOOPBACK_INTERNAL(efx))
1170 link_changed = falcon_loopback_link_poll(efx);
1171 else
1172 link_changed = efx->phy_op->poll(efx);
1174 if (link_changed) {
1175 falcon_stop_nic_stats(efx);
1176 falcon_deconfigure_mac_wrapper(efx);
1178 falcon_switch_mac(efx);
1179 rc = efx->mac_op->reconfigure(efx);
1180 BUG_ON(rc);
1182 falcon_start_nic_stats(efx);
1184 efx_link_status_changed(efx);
1187 if (EFX_IS10G(efx))
1188 falcon_poll_xmac(efx);
1191 /* Zeroes out the SRAM contents. This routine must be called in
1192 * process context and is allowed to sleep.
1194 static int falcon_reset_sram(struct efx_nic *efx)
1196 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
1197 int count;
1199 /* Set the SRAM wake/sleep GPIO appropriately. */
1200 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1201 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
1202 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1203 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1205 /* Initiate SRAM reset */
1206 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1207 FRF_AZ_SRM_INIT_EN, 1,
1208 FRF_AZ_SRM_NB_SZ, 0);
1209 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1211 /* Wait for SRAM reset to complete */
1212 count = 0;
1213 do {
1214 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
1216 /* SRAM reset is slow; expect around 16ms */
1217 schedule_timeout_uninterruptible(HZ / 50);
1219 /* Check for reset complete */
1220 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1221 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1222 EFX_LOG(efx, "SRAM reset complete\n");
1224 return 0;
1226 } while (++count < 20); /* wait upto 0.4 sec */
1228 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
1229 return -ETIMEDOUT;
1232 static int falcon_spi_device_init(struct efx_nic *efx,
1233 struct efx_spi_device **spi_device_ret,
1234 unsigned int device_id, u32 device_type)
1236 struct efx_spi_device *spi_device;
1238 if (device_type != 0) {
1239 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1240 if (!spi_device)
1241 return -ENOMEM;
1242 spi_device->device_id = device_id;
1243 spi_device->size =
1244 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
1245 spi_device->addr_len =
1246 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
1247 spi_device->munge_address = (spi_device->size == 1 << 9 &&
1248 spi_device->addr_len == 1);
1249 spi_device->erase_command =
1250 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
1251 spi_device->erase_size =
1252 1 << SPI_DEV_TYPE_FIELD(device_type,
1253 SPI_DEV_TYPE_ERASE_SIZE);
1254 spi_device->block_size =
1255 1 << SPI_DEV_TYPE_FIELD(device_type,
1256 SPI_DEV_TYPE_BLOCK_SIZE);
1257 } else {
1258 spi_device = NULL;
1261 kfree(*spi_device_ret);
1262 *spi_device_ret = spi_device;
1263 return 0;
1266 static void falcon_remove_spi_devices(struct efx_nic *efx)
1268 kfree(efx->spi_eeprom);
1269 efx->spi_eeprom = NULL;
1270 kfree(efx->spi_flash);
1271 efx->spi_flash = NULL;
1274 /* Extract non-volatile configuration */
1275 static int falcon_probe_nvconfig(struct efx_nic *efx)
1277 struct falcon_nvconfig *nvconfig;
1278 int board_rev;
1279 int rc;
1281 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1282 if (!nvconfig)
1283 return -ENOMEM;
1285 rc = falcon_read_nvram(efx, nvconfig);
1286 if (rc == -EINVAL) {
1287 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
1288 efx->phy_type = PHY_TYPE_NONE;
1289 efx->mdio.prtad = MDIO_PRTAD_NONE;
1290 board_rev = 0;
1291 rc = 0;
1292 } else if (rc) {
1293 goto fail1;
1294 } else {
1295 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
1296 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
1298 efx->phy_type = v2->port0_phy_type;
1299 efx->mdio.prtad = v2->port0_phy_addr;
1300 board_rev = le16_to_cpu(v2->board_revision);
1302 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1303 rc = falcon_spi_device_init(
1304 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1305 le32_to_cpu(v3->spi_device_type
1306 [FFE_AB_SPI_DEVICE_FLASH]));
1307 if (rc)
1308 goto fail2;
1309 rc = falcon_spi_device_init(
1310 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1311 le32_to_cpu(v3->spi_device_type
1312 [FFE_AB_SPI_DEVICE_EEPROM]));
1313 if (rc)
1314 goto fail2;
1318 /* Read the MAC addresses */
1319 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1323 rc = falcon_probe_board(efx, board_rev);
1324 if (rc)
1325 goto fail2;
1327 kfree(nvconfig);
1328 return 0;
1330 fail2:
1331 falcon_remove_spi_devices(efx);
1332 fail1:
1333 kfree(nvconfig);
1334 return rc;
1337 /* Probe all SPI devices on the NIC */
1338 static void falcon_probe_spi_devices(struct efx_nic *efx)
1340 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1341 int boot_dev;
1343 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
1344 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1345 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1347 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1348 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1349 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1350 EFX_LOG(efx, "Booted from %s\n",
1351 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
1352 } else {
1353 /* Disable VPD and set clock dividers to safe
1354 * values for initial programming. */
1355 boot_dev = -1;
1356 EFX_LOG(efx, "Booted from internal ASIC settings;"
1357 " setting SPI config\n");
1358 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1359 /* 125 MHz / 7 ~= 20 MHz */
1360 FRF_AB_EE_SF_CLOCK_DIV, 7,
1361 /* 125 MHz / 63 ~= 2 MHz */
1362 FRF_AB_EE_EE_CLOCK_DIV, 63);
1363 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1366 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1367 falcon_spi_device_init(efx, &efx->spi_flash,
1368 FFE_AB_SPI_DEVICE_FLASH,
1369 default_flash_type);
1370 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1371 falcon_spi_device_init(efx, &efx->spi_eeprom,
1372 FFE_AB_SPI_DEVICE_EEPROM,
1373 large_eeprom_type);
1376 static int falcon_probe_nic(struct efx_nic *efx)
1378 struct falcon_nic_data *nic_data;
1379 struct falcon_board *board;
1380 int rc;
1382 /* Allocate storage for hardware specific data */
1383 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1384 if (!nic_data)
1385 return -ENOMEM;
1386 efx->nic_data = nic_data;
1388 rc = -ENODEV;
1390 if (efx_nic_fpga_ver(efx) != 0) {
1391 EFX_ERR(efx, "Falcon FPGA not supported\n");
1392 goto fail1;
1395 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1396 efx_oword_t nic_stat;
1397 struct pci_dev *dev;
1398 u8 pci_rev = efx->pci_dev->revision;
1400 if ((pci_rev == 0xff) || (pci_rev == 0)) {
1401 EFX_ERR(efx, "Falcon rev A0 not supported\n");
1402 goto fail1;
1404 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1405 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
1406 EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
1407 goto fail1;
1409 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
1410 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
1411 goto fail1;
1414 dev = pci_dev_get(efx->pci_dev);
1415 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
1416 dev))) {
1417 if (dev->bus == efx->pci_dev->bus &&
1418 dev->devfn == efx->pci_dev->devfn + 1) {
1419 nic_data->pci_dev2 = dev;
1420 break;
1423 if (!nic_data->pci_dev2) {
1424 EFX_ERR(efx, "failed to find secondary function\n");
1425 rc = -ENODEV;
1426 goto fail2;
1430 /* Now we can reset the NIC */
1431 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
1432 if (rc) {
1433 EFX_ERR(efx, "failed to reset NIC\n");
1434 goto fail3;
1437 /* Allocate memory for INT_KER */
1438 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
1439 if (rc)
1440 goto fail4;
1441 BUG_ON(efx->irq_status.dma_addr & 0x0f);
1443 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
1444 (u64)efx->irq_status.dma_addr,
1445 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
1447 falcon_probe_spi_devices(efx);
1449 /* Read in the non-volatile configuration */
1450 rc = falcon_probe_nvconfig(efx);
1451 if (rc)
1452 goto fail5;
1454 /* Initialise I2C adapter */
1455 board = falcon_board(efx);
1456 board->i2c_adap.owner = THIS_MODULE;
1457 board->i2c_data = falcon_i2c_bit_operations;
1458 board->i2c_data.data = efx;
1459 board->i2c_adap.algo_data = &board->i2c_data;
1460 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
1461 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
1462 sizeof(board->i2c_adap.name));
1463 rc = i2c_bit_add_bus(&board->i2c_adap);
1464 if (rc)
1465 goto fail5;
1467 rc = falcon_board(efx)->type->init(efx);
1468 if (rc) {
1469 EFX_ERR(efx, "failed to initialise board\n");
1470 goto fail6;
1473 nic_data->stats_disable_count = 1;
1474 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
1475 (unsigned long)efx);
1477 return 0;
1479 fail6:
1480 BUG_ON(i2c_del_adapter(&board->i2c_adap));
1481 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1482 fail5:
1483 falcon_remove_spi_devices(efx);
1484 efx_nic_free_buffer(efx, &efx->irq_status);
1485 fail4:
1486 fail3:
1487 if (nic_data->pci_dev2) {
1488 pci_dev_put(nic_data->pci_dev2);
1489 nic_data->pci_dev2 = NULL;
1491 fail2:
1492 fail1:
1493 kfree(efx->nic_data);
1494 return rc;
1497 static void falcon_init_rx_cfg(struct efx_nic *efx)
1499 /* Prior to Siena the RX DMA engine will split each frame at
1500 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
1501 * be so large that that never happens. */
1502 const unsigned huge_buf_size = (3 * 4096) >> 5;
1503 /* RX control FIFO thresholds (32 entries) */
1504 const unsigned ctrl_xon_thr = 20;
1505 const unsigned ctrl_xoff_thr = 25;
1506 /* RX data FIFO thresholds (256-byte units; size varies) */
1507 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1508 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1509 efx_oword_t reg;
1511 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1512 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1513 /* Data FIFO size is 5.5K */
1514 if (data_xon_thr < 0)
1515 data_xon_thr = 512 >> 8;
1516 if (data_xoff_thr < 0)
1517 data_xoff_thr = 2048 >> 8;
1518 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1519 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1520 huge_buf_size);
1521 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
1522 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
1523 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1524 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1525 } else {
1526 /* Data FIFO size is 80K; register fields moved */
1527 if (data_xon_thr < 0)
1528 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1529 if (data_xoff_thr < 0)
1530 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1531 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1532 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1533 huge_buf_size);
1534 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
1535 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
1536 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1537 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1538 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1540 /* Always enable XOFF signal from RX FIFO. We enable
1541 * or disable transmission of pause frames at the MAC. */
1542 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1543 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1546 /* This call performs hardware-specific global initialisation, such as
1547 * defining the descriptor cache sizes and number of RSS channels.
1548 * It does not set up any buffers, descriptor rings or event queues.
1550 static int falcon_init_nic(struct efx_nic *efx)
1552 efx_oword_t temp;
1553 int rc;
1555 /* Use on-chip SRAM */
1556 efx_reado(efx, &temp, FR_AB_NIC_STAT);
1557 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
1558 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
1560 /* Set the source of the GMAC clock */
1561 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
1562 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
1563 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
1564 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
1567 /* Select the correct MAC */
1568 falcon_clock_mac(efx);
1570 rc = falcon_reset_sram(efx);
1571 if (rc)
1572 return rc;
1574 /* Clear the parity enables on the TX data fifos as
1575 * they produce false parity errors because of timing issues
1577 if (EFX_WORKAROUND_5129(efx)) {
1578 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
1579 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
1580 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
1583 if (EFX_WORKAROUND_7244(efx)) {
1584 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
1585 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
1586 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
1587 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
1588 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
1589 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
1592 /* XXX This is documented only for Falcon A0/A1 */
1593 /* Setup RX. Wait for descriptor is broken and must
1594 * be disabled. RXDP recovery shouldn't be needed, but is.
1596 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
1597 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
1598 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
1599 if (EFX_WORKAROUND_5583(efx))
1600 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
1601 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
1603 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
1604 * descriptors (which is bad).
1606 efx_reado(efx, &temp, FR_AZ_TX_CFG);
1607 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
1608 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
1610 falcon_init_rx_cfg(efx);
1612 /* Set destination of both TX and RX Flush events */
1613 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1614 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
1615 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1618 efx_nic_init_common(efx);
1620 return 0;
1623 static void falcon_remove_nic(struct efx_nic *efx)
1625 struct falcon_nic_data *nic_data = efx->nic_data;
1626 struct falcon_board *board = falcon_board(efx);
1627 int rc;
1629 board->type->fini(efx);
1631 /* Remove I2C adapter and clear it in preparation for a retry */
1632 rc = i2c_del_adapter(&board->i2c_adap);
1633 BUG_ON(rc);
1634 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1636 falcon_remove_spi_devices(efx);
1637 efx_nic_free_buffer(efx, &efx->irq_status);
1639 falcon_reset_hw(efx, RESET_TYPE_ALL);
1641 /* Release the second function after the reset */
1642 if (nic_data->pci_dev2) {
1643 pci_dev_put(nic_data->pci_dev2);
1644 nic_data->pci_dev2 = NULL;
1647 /* Tear down the private nic state */
1648 kfree(efx->nic_data);
1649 efx->nic_data = NULL;
1652 static void falcon_update_nic_stats(struct efx_nic *efx)
1654 struct falcon_nic_data *nic_data = efx->nic_data;
1655 efx_oword_t cnt;
1657 if (nic_data->stats_disable_count)
1658 return;
1660 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
1661 efx->n_rx_nodesc_drop_cnt +=
1662 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
1664 if (nic_data->stats_pending &&
1665 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
1666 nic_data->stats_pending = false;
1667 rmb(); /* read the done flag before the stats */
1668 efx->mac_op->update_stats(efx);
1672 void falcon_start_nic_stats(struct efx_nic *efx)
1674 struct falcon_nic_data *nic_data = efx->nic_data;
1676 spin_lock_bh(&efx->stats_lock);
1677 if (--nic_data->stats_disable_count == 0)
1678 falcon_stats_request(efx);
1679 spin_unlock_bh(&efx->stats_lock);
1682 void falcon_stop_nic_stats(struct efx_nic *efx)
1684 struct falcon_nic_data *nic_data = efx->nic_data;
1685 int i;
1687 might_sleep();
1689 spin_lock_bh(&efx->stats_lock);
1690 ++nic_data->stats_disable_count;
1691 spin_unlock_bh(&efx->stats_lock);
1693 del_timer_sync(&nic_data->stats_timer);
1695 /* Wait enough time for the most recent transfer to
1696 * complete. */
1697 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
1698 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
1699 break;
1700 msleep(1);
1703 spin_lock_bh(&efx->stats_lock);
1704 falcon_stats_complete(efx);
1705 spin_unlock_bh(&efx->stats_lock);
1708 static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1710 falcon_board(efx)->type->set_id_led(efx, mode);
1713 /**************************************************************************
1715 * Wake on LAN
1717 **************************************************************************
1720 static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1722 wol->supported = 0;
1723 wol->wolopts = 0;
1724 memset(&wol->sopass, 0, sizeof(wol->sopass));
1727 static int falcon_set_wol(struct efx_nic *efx, u32 type)
1729 if (type != 0)
1730 return -EINVAL;
1731 return 0;
1734 /**************************************************************************
1736 * Revision-dependent attributes used by efx.c and nic.c
1738 **************************************************************************
1741 struct efx_nic_type falcon_a1_nic_type = {
1742 .probe = falcon_probe_nic,
1743 .remove = falcon_remove_nic,
1744 .init = falcon_init_nic,
1745 .fini = efx_port_dummy_op_void,
1746 .monitor = falcon_monitor,
1747 .reset = falcon_reset_hw,
1748 .probe_port = falcon_probe_port,
1749 .remove_port = falcon_remove_port,
1750 .prepare_flush = falcon_prepare_flush,
1751 .update_stats = falcon_update_nic_stats,
1752 .start_stats = falcon_start_nic_stats,
1753 .stop_stats = falcon_stop_nic_stats,
1754 .set_id_led = falcon_set_id_led,
1755 .push_irq_moderation = falcon_push_irq_moderation,
1756 .push_multicast_hash = falcon_push_multicast_hash,
1757 .reconfigure_port = falcon_reconfigure_port,
1758 .get_wol = falcon_get_wol,
1759 .set_wol = falcon_set_wol,
1760 .resume_wol = efx_port_dummy_op_void,
1761 .test_nvram = falcon_test_nvram,
1762 .default_mac_ops = &falcon_xmac_operations,
1764 .revision = EFX_REV_FALCON_A1,
1765 .mem_map_size = 0x20000,
1766 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
1767 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
1768 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
1769 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
1770 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
1771 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1772 .rx_buffer_padding = 0x24,
1773 .max_interrupt_mode = EFX_INT_MODE_MSI,
1774 .phys_addr_channels = 4,
1775 .tx_dc_base = 0x130000,
1776 .rx_dc_base = 0x100000,
1777 .offload_features = NETIF_F_IP_CSUM,
1778 .reset_world_flags = ETH_RESET_IRQ,
1781 struct efx_nic_type falcon_b0_nic_type = {
1782 .probe = falcon_probe_nic,
1783 .remove = falcon_remove_nic,
1784 .init = falcon_init_nic,
1785 .fini = efx_port_dummy_op_void,
1786 .monitor = falcon_monitor,
1787 .reset = falcon_reset_hw,
1788 .probe_port = falcon_probe_port,
1789 .remove_port = falcon_remove_port,
1790 .prepare_flush = falcon_prepare_flush,
1791 .update_stats = falcon_update_nic_stats,
1792 .start_stats = falcon_start_nic_stats,
1793 .stop_stats = falcon_stop_nic_stats,
1794 .set_id_led = falcon_set_id_led,
1795 .push_irq_moderation = falcon_push_irq_moderation,
1796 .push_multicast_hash = falcon_push_multicast_hash,
1797 .reconfigure_port = falcon_reconfigure_port,
1798 .get_wol = falcon_get_wol,
1799 .set_wol = falcon_set_wol,
1800 .resume_wol = efx_port_dummy_op_void,
1801 .test_registers = falcon_b0_test_registers,
1802 .test_nvram = falcon_test_nvram,
1803 .default_mac_ops = &falcon_xmac_operations,
1805 .revision = EFX_REV_FALCON_B0,
1806 /* Map everything up to and including the RSS indirection
1807 * table. Don't map MSI-X table, MSI-X PBA since Linux
1808 * requires that they not be mapped. */
1809 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
1810 FR_BZ_RX_INDIRECTION_TBL_STEP *
1811 FR_BZ_RX_INDIRECTION_TBL_ROWS),
1812 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
1813 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
1814 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
1815 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
1816 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1817 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1818 .rx_buffer_padding = 0,
1819 .max_interrupt_mode = EFX_INT_MODE_MSIX,
1820 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1821 * interrupt handler only supports 32
1822 * channels */
1823 .tx_dc_base = 0x130000,
1824 .rx_dc_base = 0x100000,
1825 .offload_features = NETIF_F_IP_CSUM,
1826 .reset_world_flags = ETH_RESET_IRQ,