Merge remote-tracking branch 'cleancache/linux-next'
[linux-2.6/next.git] / drivers / net / sfc / nic.c
blob5ac9fa2cd3bc2a740cbe4d920c3944e4dfc4a913
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include "net_driver.h"
17 #include "bitfield.h"
18 #include "efx.h"
19 #include "nic.h"
20 #include "regs.h"
21 #include "io.h"
22 #include "workarounds.h"
24 /**************************************************************************
26 * Configurable values
28 **************************************************************************
31 /* This is set to 16 for a good reason. In summary, if larger than
32 * 16, the descriptor cache holds more than a default socket
33 * buffer's worth of packets (for UDP we can only have at most one
34 * socket buffer's worth outstanding). This combined with the fact
35 * that we only get 1 TX event per descriptor cache means the NIC
36 * goes idle.
38 #define TX_DC_ENTRIES 16
39 #define TX_DC_ENTRIES_ORDER 1
41 #define RX_DC_ENTRIES 64
42 #define RX_DC_ENTRIES_ORDER 3
44 /* If EFX_MAX_INT_ERRORS internal errors occur within
45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
46 * disable it.
48 #define EFX_INT_ERROR_EXPIRE 3600
49 #define EFX_MAX_INT_ERRORS 5
51 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53 #define EFX_FLUSH_INTERVAL 10
54 #define EFX_FLUSH_POLL_COUNT 100
56 /* Size and alignment of special buffers (4KB) */
57 #define EFX_BUF_SIZE 4096
59 /* Depth of RX flush request fifo */
60 #define EFX_RX_FLUSH_COUNT 4
62 /* Generated event code for efx_generate_test_event() */
63 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
64 (0x00010100 + (_channel)->channel)
66 /* Generated event code for efx_generate_fill_event() */
67 #define EFX_CHANNEL_MAGIC_FILL(_channel) \
68 (0x00010200 + (_channel)->channel)
70 /**************************************************************************
72 * Solarstorm hardware access
74 **************************************************************************/
76 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
77 unsigned int index)
79 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
80 value, index);
83 /* Read the current event from the event queue */
84 static inline efx_qword_t *efx_event(struct efx_channel *channel,
85 unsigned int index)
87 return ((efx_qword_t *) (channel->eventq.addr)) +
88 (index & channel->eventq_mask);
91 /* See if an event is present
93 * We check both the high and low dword of the event for all ones. We
94 * wrote all ones when we cleared the event, and no valid event can
95 * have all ones in either its high or low dwords. This approach is
96 * robust against reordering.
98 * Note that using a single 64-bit comparison is incorrect; even
99 * though the CPU read will be atomic, the DMA write may not be.
101 static inline int efx_event_present(efx_qword_t *event)
103 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
104 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
107 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
108 const efx_oword_t *mask)
110 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
111 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
114 int efx_nic_test_registers(struct efx_nic *efx,
115 const struct efx_nic_register_test *regs,
116 size_t n_regs)
118 unsigned address = 0, i, j;
119 efx_oword_t mask, imask, original, reg, buf;
121 /* Falcon should be in loopback to isolate the XMAC from the PHY */
122 WARN_ON(!LOOPBACK_INTERNAL(efx));
124 for (i = 0; i < n_regs; ++i) {
125 address = regs[i].address;
126 mask = imask = regs[i].mask;
127 EFX_INVERT_OWORD(imask);
129 efx_reado(efx, &original, address);
131 /* bit sweep on and off */
132 for (j = 0; j < 128; j++) {
133 if (!EFX_EXTRACT_OWORD32(mask, j, j))
134 continue;
136 /* Test this testable bit can be set in isolation */
137 EFX_AND_OWORD(reg, original, mask);
138 EFX_SET_OWORD32(reg, j, j, 1);
140 efx_writeo(efx, &reg, address);
141 efx_reado(efx, &buf, address);
143 if (efx_masked_compare_oword(&reg, &buf, &mask))
144 goto fail;
146 /* Test this testable bit can be cleared in isolation */
147 EFX_OR_OWORD(reg, original, mask);
148 EFX_SET_OWORD32(reg, j, j, 0);
150 efx_writeo(efx, &reg, address);
151 efx_reado(efx, &buf, address);
153 if (efx_masked_compare_oword(&reg, &buf, &mask))
154 goto fail;
157 efx_writeo(efx, &original, address);
160 return 0;
162 fail:
163 netif_err(efx, hw, efx->net_dev,
164 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
165 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
166 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
167 return -EIO;
170 /**************************************************************************
172 * Special buffer handling
173 * Special buffers are used for event queues and the TX and RX
174 * descriptor rings.
176 *************************************************************************/
179 * Initialise a special buffer
181 * This will define a buffer (previously allocated via
182 * efx_alloc_special_buffer()) in the buffer table, allowing
183 * it to be used for event queues, descriptor rings etc.
185 static void
186 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
188 efx_qword_t buf_desc;
189 int index;
190 dma_addr_t dma_addr;
191 int i;
193 EFX_BUG_ON_PARANOID(!buffer->addr);
195 /* Write buffer descriptors to NIC */
196 for (i = 0; i < buffer->entries; i++) {
197 index = buffer->index + i;
198 dma_addr = buffer->dma_addr + (i * 4096);
199 netif_dbg(efx, probe, efx->net_dev,
200 "mapping special buffer %d at %llx\n",
201 index, (unsigned long long)dma_addr);
202 EFX_POPULATE_QWORD_3(buf_desc,
203 FRF_AZ_BUF_ADR_REGION, 0,
204 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
205 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
206 efx_write_buf_tbl(efx, &buf_desc, index);
210 /* Unmaps a buffer and clears the buffer table entries */
211 static void
212 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
214 efx_oword_t buf_tbl_upd;
215 unsigned int start = buffer->index;
216 unsigned int end = (buffer->index + buffer->entries - 1);
218 if (!buffer->entries)
219 return;
221 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
222 buffer->index, buffer->index + buffer->entries - 1);
224 EFX_POPULATE_OWORD_4(buf_tbl_upd,
225 FRF_AZ_BUF_UPD_CMD, 0,
226 FRF_AZ_BUF_CLR_CMD, 1,
227 FRF_AZ_BUF_CLR_END_ID, end,
228 FRF_AZ_BUF_CLR_START_ID, start);
229 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
233 * Allocate a new special buffer
235 * This allocates memory for a new buffer, clears it and allocates a
236 * new buffer ID range. It does not write into the buffer table.
238 * This call will allocate 4KB buffers, since 8KB buffers can't be
239 * used for event queues and descriptor rings.
241 static int efx_alloc_special_buffer(struct efx_nic *efx,
242 struct efx_special_buffer *buffer,
243 unsigned int len)
245 len = ALIGN(len, EFX_BUF_SIZE);
247 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
248 &buffer->dma_addr, GFP_KERNEL);
249 if (!buffer->addr)
250 return -ENOMEM;
251 buffer->len = len;
252 buffer->entries = len / EFX_BUF_SIZE;
253 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
255 /* All zeros is a potentially valid event so memset to 0xff */
256 memset(buffer->addr, 0xff, len);
258 /* Select new buffer ID */
259 buffer->index = efx->next_buffer_table;
260 efx->next_buffer_table += buffer->entries;
262 netif_dbg(efx, probe, efx->net_dev,
263 "allocating special buffers %d-%d at %llx+%x "
264 "(virt %p phys %llx)\n", buffer->index,
265 buffer->index + buffer->entries - 1,
266 (u64)buffer->dma_addr, len,
267 buffer->addr, (u64)virt_to_phys(buffer->addr));
269 return 0;
272 static void
273 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
275 if (!buffer->addr)
276 return;
278 netif_dbg(efx, hw, efx->net_dev,
279 "deallocating special buffers %d-%d at %llx+%x "
280 "(virt %p phys %llx)\n", buffer->index,
281 buffer->index + buffer->entries - 1,
282 (u64)buffer->dma_addr, buffer->len,
283 buffer->addr, (u64)virt_to_phys(buffer->addr));
285 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
286 buffer->dma_addr);
287 buffer->addr = NULL;
288 buffer->entries = 0;
291 /**************************************************************************
293 * Generic buffer handling
294 * These buffers are used for interrupt status and MAC stats
296 **************************************************************************/
298 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
299 unsigned int len)
301 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
302 &buffer->dma_addr);
303 if (!buffer->addr)
304 return -ENOMEM;
305 buffer->len = len;
306 memset(buffer->addr, 0, len);
307 return 0;
310 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
312 if (buffer->addr) {
313 pci_free_consistent(efx->pci_dev, buffer->len,
314 buffer->addr, buffer->dma_addr);
315 buffer->addr = NULL;
319 /**************************************************************************
321 * TX path
323 **************************************************************************/
325 /* Returns a pointer to the specified transmit descriptor in the TX
326 * descriptor queue belonging to the specified channel.
328 static inline efx_qword_t *
329 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
331 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
334 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
335 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
337 unsigned write_ptr;
338 efx_dword_t reg;
340 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
341 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
342 efx_writed_page(tx_queue->efx, &reg,
343 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
346 /* Write pointer and first descriptor for TX descriptor ring */
347 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
348 const efx_qword_t *txd)
350 unsigned write_ptr;
351 efx_oword_t reg;
353 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
354 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
356 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
357 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
358 FRF_AZ_TX_DESC_WPTR, write_ptr);
359 reg.qword[0] = *txd;
360 efx_writeo_page(tx_queue->efx, &reg,
361 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
364 static inline bool
365 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
367 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
369 if (empty_read_count == 0)
370 return false;
372 tx_queue->empty_read_count = 0;
373 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
376 /* For each entry inserted into the software descriptor ring, create a
377 * descriptor in the hardware TX descriptor ring (in host memory), and
378 * write a doorbell.
380 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
383 struct efx_tx_buffer *buffer;
384 efx_qword_t *txd;
385 unsigned write_ptr;
386 unsigned old_write_count = tx_queue->write_count;
388 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
390 do {
391 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
392 buffer = &tx_queue->buffer[write_ptr];
393 txd = efx_tx_desc(tx_queue, write_ptr);
394 ++tx_queue->write_count;
396 /* Create TX descriptor ring entry */
397 EFX_POPULATE_QWORD_4(*txd,
398 FSF_AZ_TX_KER_CONT, buffer->continuation,
399 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
400 FSF_AZ_TX_KER_BUF_REGION, 0,
401 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
402 } while (tx_queue->write_count != tx_queue->insert_count);
404 wmb(); /* Ensure descriptors are written before they are fetched */
406 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
407 txd = efx_tx_desc(tx_queue,
408 old_write_count & tx_queue->ptr_mask);
409 efx_push_tx_desc(tx_queue, txd);
410 ++tx_queue->pushes;
411 } else {
412 efx_notify_tx_desc(tx_queue);
416 /* Allocate hardware resources for a TX queue */
417 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
419 struct efx_nic *efx = tx_queue->efx;
420 unsigned entries;
422 entries = tx_queue->ptr_mask + 1;
423 return efx_alloc_special_buffer(efx, &tx_queue->txd,
424 entries * sizeof(efx_qword_t));
427 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
429 struct efx_nic *efx = tx_queue->efx;
430 efx_oword_t reg;
432 tx_queue->flushed = FLUSH_NONE;
434 /* Pin TX descriptor ring */
435 efx_init_special_buffer(efx, &tx_queue->txd);
437 /* Push TX descriptor ring to card */
438 EFX_POPULATE_OWORD_10(reg,
439 FRF_AZ_TX_DESCQ_EN, 1,
440 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
441 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
442 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
443 FRF_AZ_TX_DESCQ_EVQ_ID,
444 tx_queue->channel->channel,
445 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
446 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
447 FRF_AZ_TX_DESCQ_SIZE,
448 __ffs(tx_queue->txd.entries),
449 FRF_AZ_TX_DESCQ_TYPE, 0,
450 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
452 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
453 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
454 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
455 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
456 !csum);
459 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
460 tx_queue->queue);
462 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
463 /* Only 128 bits in this register */
464 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
466 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
467 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
468 clear_bit_le(tx_queue->queue, (void *)&reg);
469 else
470 set_bit_le(tx_queue->queue, (void *)&reg);
471 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
474 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
475 EFX_POPULATE_OWORD_1(reg,
476 FRF_BZ_TX_PACE,
477 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
478 FFE_BZ_TX_PACE_OFF :
479 FFE_BZ_TX_PACE_RESERVED);
480 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
481 tx_queue->queue);
485 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
487 struct efx_nic *efx = tx_queue->efx;
488 efx_oword_t tx_flush_descq;
490 tx_queue->flushed = FLUSH_PENDING;
492 /* Post a flush command */
493 EFX_POPULATE_OWORD_2(tx_flush_descq,
494 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
495 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
496 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
499 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
501 struct efx_nic *efx = tx_queue->efx;
502 efx_oword_t tx_desc_ptr;
504 /* The queue should have been flushed */
505 WARN_ON(tx_queue->flushed != FLUSH_DONE);
507 /* Remove TX descriptor ring from card */
508 EFX_ZERO_OWORD(tx_desc_ptr);
509 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
510 tx_queue->queue);
512 /* Unpin TX descriptor ring */
513 efx_fini_special_buffer(efx, &tx_queue->txd);
516 /* Free buffers backing TX queue */
517 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
519 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
522 /**************************************************************************
524 * RX path
526 **************************************************************************/
528 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
529 static inline efx_qword_t *
530 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
532 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
535 /* This creates an entry in the RX descriptor queue */
536 static inline void
537 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
539 struct efx_rx_buffer *rx_buf;
540 efx_qword_t *rxd;
542 rxd = efx_rx_desc(rx_queue, index);
543 rx_buf = efx_rx_buffer(rx_queue, index);
544 EFX_POPULATE_QWORD_3(*rxd,
545 FSF_AZ_RX_KER_BUF_SIZE,
546 rx_buf->len -
547 rx_queue->efx->type->rx_buffer_padding,
548 FSF_AZ_RX_KER_BUF_REGION, 0,
549 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
552 /* This writes to the RX_DESC_WPTR register for the specified receive
553 * descriptor ring.
555 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
557 struct efx_nic *efx = rx_queue->efx;
558 efx_dword_t reg;
559 unsigned write_ptr;
561 while (rx_queue->notified_count != rx_queue->added_count) {
562 efx_build_rx_desc(
563 rx_queue,
564 rx_queue->notified_count & rx_queue->ptr_mask);
565 ++rx_queue->notified_count;
568 wmb();
569 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
570 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
571 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
572 efx_rx_queue_index(rx_queue));
575 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
577 struct efx_nic *efx = rx_queue->efx;
578 unsigned entries;
580 entries = rx_queue->ptr_mask + 1;
581 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
582 entries * sizeof(efx_qword_t));
585 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
587 efx_oword_t rx_desc_ptr;
588 struct efx_nic *efx = rx_queue->efx;
589 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
590 bool iscsi_digest_en = is_b0;
592 netif_dbg(efx, hw, efx->net_dev,
593 "RX queue %d ring in special buffers %d-%d\n",
594 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
595 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
597 rx_queue->flushed = FLUSH_NONE;
599 /* Pin RX descriptor ring */
600 efx_init_special_buffer(efx, &rx_queue->rxd);
602 /* Push RX descriptor ring to card */
603 EFX_POPULATE_OWORD_10(rx_desc_ptr,
604 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
605 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
606 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
607 FRF_AZ_RX_DESCQ_EVQ_ID,
608 efx_rx_queue_channel(rx_queue)->channel,
609 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
610 FRF_AZ_RX_DESCQ_LABEL,
611 efx_rx_queue_index(rx_queue),
612 FRF_AZ_RX_DESCQ_SIZE,
613 __ffs(rx_queue->rxd.entries),
614 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
615 /* For >=B0 this is scatter so disable */
616 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
617 FRF_AZ_RX_DESCQ_EN, 1);
618 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
619 efx_rx_queue_index(rx_queue));
622 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
624 struct efx_nic *efx = rx_queue->efx;
625 efx_oword_t rx_flush_descq;
627 rx_queue->flushed = FLUSH_PENDING;
629 /* Post a flush command */
630 EFX_POPULATE_OWORD_2(rx_flush_descq,
631 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
632 FRF_AZ_RX_FLUSH_DESCQ,
633 efx_rx_queue_index(rx_queue));
634 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
637 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
639 efx_oword_t rx_desc_ptr;
640 struct efx_nic *efx = rx_queue->efx;
642 /* The queue should already have been flushed */
643 WARN_ON(rx_queue->flushed != FLUSH_DONE);
645 /* Remove RX descriptor ring from card */
646 EFX_ZERO_OWORD(rx_desc_ptr);
647 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
648 efx_rx_queue_index(rx_queue));
650 /* Unpin RX descriptor ring */
651 efx_fini_special_buffer(efx, &rx_queue->rxd);
654 /* Free buffers backing RX queue */
655 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
657 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
660 /**************************************************************************
662 * Event queue processing
663 * Event queues are processed by per-channel tasklets.
665 **************************************************************************/
667 /* Update a channel's event queue's read pointer (RPTR) register
669 * This writes the EVQ_RPTR_REG register for the specified channel's
670 * event queue.
672 void efx_nic_eventq_read_ack(struct efx_channel *channel)
674 efx_dword_t reg;
675 struct efx_nic *efx = channel->efx;
677 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
678 channel->eventq_read_ptr & channel->eventq_mask);
679 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
680 channel->channel);
683 /* Use HW to insert a SW defined event */
684 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
686 efx_oword_t drv_ev_reg;
688 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
689 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
690 drv_ev_reg.u32[0] = event->u32[0];
691 drv_ev_reg.u32[1] = event->u32[1];
692 drv_ev_reg.u32[2] = 0;
693 drv_ev_reg.u32[3] = 0;
694 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
695 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
698 /* Handle a transmit completion event
700 * The NIC batches TX completion events; the message we receive is of
701 * the form "complete all TX events up to this index".
703 static int
704 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
706 unsigned int tx_ev_desc_ptr;
707 unsigned int tx_ev_q_label;
708 struct efx_tx_queue *tx_queue;
709 struct efx_nic *efx = channel->efx;
710 int tx_packets = 0;
712 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
713 /* Transmit completion */
714 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
715 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
716 tx_queue = efx_channel_get_tx_queue(
717 channel, tx_ev_q_label % EFX_TXQ_TYPES);
718 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
719 tx_queue->ptr_mask);
720 channel->irq_mod_score += tx_packets;
721 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
722 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
723 /* Rewrite the FIFO write pointer */
724 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
725 tx_queue = efx_channel_get_tx_queue(
726 channel, tx_ev_q_label % EFX_TXQ_TYPES);
728 if (efx_dev_registered(efx))
729 netif_tx_lock(efx->net_dev);
730 efx_notify_tx_desc(tx_queue);
731 if (efx_dev_registered(efx))
732 netif_tx_unlock(efx->net_dev);
733 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
734 EFX_WORKAROUND_10727(efx)) {
735 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
736 } else {
737 netif_err(efx, tx_err, efx->net_dev,
738 "channel %d unexpected TX event "
739 EFX_QWORD_FMT"\n", channel->channel,
740 EFX_QWORD_VAL(*event));
743 return tx_packets;
746 /* Detect errors included in the rx_evt_pkt_ok bit. */
747 static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
748 const efx_qword_t *event,
749 bool *rx_ev_pkt_ok,
750 bool *discard)
752 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
753 struct efx_nic *efx = rx_queue->efx;
754 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
755 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
756 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
757 bool rx_ev_other_err, rx_ev_pause_frm;
758 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
759 unsigned rx_ev_pkt_type;
761 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
762 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
763 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
764 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
765 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
766 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
767 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
768 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
769 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
770 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
771 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
772 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
773 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
774 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
775 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
777 /* Every error apart from tobe_disc and pause_frm */
778 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
779 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
780 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
782 /* Count errors that are not in MAC stats. Ignore expected
783 * checksum errors during self-test. */
784 if (rx_ev_frm_trunc)
785 ++channel->n_rx_frm_trunc;
786 else if (rx_ev_tobe_disc)
787 ++channel->n_rx_tobe_disc;
788 else if (!efx->loopback_selftest) {
789 if (rx_ev_ip_hdr_chksum_err)
790 ++channel->n_rx_ip_hdr_chksum_err;
791 else if (rx_ev_tcp_udp_chksum_err)
792 ++channel->n_rx_tcp_udp_chksum_err;
795 /* The frame must be discarded if any of these are true. */
796 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
797 rx_ev_tobe_disc | rx_ev_pause_frm);
799 /* TOBE_DISC is expected on unicast mismatches; don't print out an
800 * error message. FRM_TRUNC indicates RXDP dropped the packet due
801 * to a FIFO overflow.
803 #ifdef EFX_ENABLE_DEBUG
804 if (rx_ev_other_err && net_ratelimit()) {
805 netif_dbg(efx, rx_err, efx->net_dev,
806 " RX queue %d unexpected RX event "
807 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
808 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
809 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
810 rx_ev_ip_hdr_chksum_err ?
811 " [IP_HDR_CHKSUM_ERR]" : "",
812 rx_ev_tcp_udp_chksum_err ?
813 " [TCP_UDP_CHKSUM_ERR]" : "",
814 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
815 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
816 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
817 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
818 rx_ev_pause_frm ? " [PAUSE]" : "");
820 #endif
823 /* Handle receive events that are not in-order. */
824 static void
825 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
827 struct efx_nic *efx = rx_queue->efx;
828 unsigned expected, dropped;
830 expected = rx_queue->removed_count & rx_queue->ptr_mask;
831 dropped = (index - expected) & rx_queue->ptr_mask;
832 netif_info(efx, rx_err, efx->net_dev,
833 "dropped %d events (index=%d expected=%d)\n",
834 dropped, index, expected);
836 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
837 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
840 /* Handle a packet received event
842 * The NIC gives a "discard" flag if it's a unicast packet with the
843 * wrong destination address
844 * Also "is multicast" and "matches multicast filter" flags can be used to
845 * discard non-matching multicast packets.
847 static void
848 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
851 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
852 unsigned expected_ptr;
853 bool rx_ev_pkt_ok, discard = false, checksummed;
854 struct efx_rx_queue *rx_queue;
856 /* Basic packet information */
857 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
858 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
859 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
860 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
861 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
862 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
863 channel->channel);
865 rx_queue = efx_channel_get_rx_queue(channel);
867 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
868 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
869 if (unlikely(rx_ev_desc_ptr != expected_ptr))
870 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
872 if (likely(rx_ev_pkt_ok)) {
873 /* If packet is marked as OK and packet type is TCP/IP or
874 * UDP/IP, then we can rely on the hardware checksum.
876 checksummed =
877 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
878 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
879 } else {
880 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
881 checksummed = false;
884 /* Detect multicast packets that didn't match the filter */
885 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
886 if (rx_ev_mcast_pkt) {
887 unsigned int rx_ev_mcast_hash_match =
888 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
890 if (unlikely(!rx_ev_mcast_hash_match)) {
891 ++channel->n_rx_mcast_mismatch;
892 discard = true;
896 channel->irq_mod_score += 2;
898 /* Handle received packet */
899 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
900 checksummed, discard);
903 static void
904 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
906 struct efx_nic *efx = channel->efx;
907 unsigned code;
909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
910 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
911 ; /* ignore */
912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
913 /* The queue must be empty, so we won't receive any rx
914 * events, so efx_process_channel() won't refill the
915 * queue. Refill it here */
916 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
917 else
918 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
919 "generated event "EFX_QWORD_FMT"\n",
920 channel->channel, EFX_QWORD_VAL(*event));
923 static void
924 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
926 struct efx_nic *efx = channel->efx;
927 unsigned int ev_sub_code;
928 unsigned int ev_sub_data;
930 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
931 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
933 switch (ev_sub_code) {
934 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
935 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
936 channel->channel, ev_sub_data);
937 break;
938 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
939 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
940 channel->channel, ev_sub_data);
941 break;
942 case FSE_AZ_EVQ_INIT_DONE_EV:
943 netif_dbg(efx, hw, efx->net_dev,
944 "channel %d EVQ %d initialised\n",
945 channel->channel, ev_sub_data);
946 break;
947 case FSE_AZ_SRM_UPD_DONE_EV:
948 netif_vdbg(efx, hw, efx->net_dev,
949 "channel %d SRAM update done\n", channel->channel);
950 break;
951 case FSE_AZ_WAKE_UP_EV:
952 netif_vdbg(efx, hw, efx->net_dev,
953 "channel %d RXQ %d wakeup event\n",
954 channel->channel, ev_sub_data);
955 break;
956 case FSE_AZ_TIMER_EV:
957 netif_vdbg(efx, hw, efx->net_dev,
958 "channel %d RX queue %d timer expired\n",
959 channel->channel, ev_sub_data);
960 break;
961 case FSE_AA_RX_RECOVER_EV:
962 netif_err(efx, rx_err, efx->net_dev,
963 "channel %d seen DRIVER RX_RESET event. "
964 "Resetting.\n", channel->channel);
965 atomic_inc(&efx->rx_reset);
966 efx_schedule_reset(efx,
967 EFX_WORKAROUND_6555(efx) ?
968 RESET_TYPE_RX_RECOVERY :
969 RESET_TYPE_DISABLE);
970 break;
971 case FSE_BZ_RX_DSC_ERROR_EV:
972 netif_err(efx, rx_err, efx->net_dev,
973 "RX DMA Q %d reports descriptor fetch error."
974 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
975 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
976 break;
977 case FSE_BZ_TX_DSC_ERROR_EV:
978 netif_err(efx, tx_err, efx->net_dev,
979 "TX DMA Q %d reports descriptor fetch error."
980 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
981 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
982 break;
983 default:
984 netif_vdbg(efx, hw, efx->net_dev,
985 "channel %d unknown driver event code %d "
986 "data %04x\n", channel->channel, ev_sub_code,
987 ev_sub_data);
988 break;
992 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
994 struct efx_nic *efx = channel->efx;
995 unsigned int read_ptr;
996 efx_qword_t event, *p_event;
997 int ev_code;
998 int tx_packets = 0;
999 int spent = 0;
1001 read_ptr = channel->eventq_read_ptr;
1003 for (;;) {
1004 p_event = efx_event(channel, read_ptr);
1005 event = *p_event;
1007 if (!efx_event_present(&event))
1008 /* End of events */
1009 break;
1011 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1012 "channel %d event is "EFX_QWORD_FMT"\n",
1013 channel->channel, EFX_QWORD_VAL(event));
1015 /* Clear this event by marking it all ones */
1016 EFX_SET_QWORD(*p_event);
1018 ++read_ptr;
1020 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1022 switch (ev_code) {
1023 case FSE_AZ_EV_CODE_RX_EV:
1024 efx_handle_rx_event(channel, &event);
1025 if (++spent == budget)
1026 goto out;
1027 break;
1028 case FSE_AZ_EV_CODE_TX_EV:
1029 tx_packets += efx_handle_tx_event(channel, &event);
1030 if (tx_packets > efx->txq_entries) {
1031 spent = budget;
1032 goto out;
1034 break;
1035 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1036 efx_handle_generated_event(channel, &event);
1037 break;
1038 case FSE_AZ_EV_CODE_DRIVER_EV:
1039 efx_handle_driver_event(channel, &event);
1040 break;
1041 case FSE_CZ_EV_CODE_MCDI_EV:
1042 efx_mcdi_process_event(channel, &event);
1043 break;
1044 case FSE_AZ_EV_CODE_GLOBAL_EV:
1045 if (efx->type->handle_global_event &&
1046 efx->type->handle_global_event(channel, &event))
1047 break;
1048 /* else fall through */
1049 default:
1050 netif_err(channel->efx, hw, channel->efx->net_dev,
1051 "channel %d unknown event type %d (data "
1052 EFX_QWORD_FMT ")\n", channel->channel,
1053 ev_code, EFX_QWORD_VAL(event));
1057 out:
1058 channel->eventq_read_ptr = read_ptr;
1059 return spent;
1062 /* Check whether an event is present in the eventq at the current
1063 * read pointer. Only useful for self-test.
1065 bool efx_nic_event_present(struct efx_channel *channel)
1067 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1070 /* Allocate buffer table entries for event queue */
1071 int efx_nic_probe_eventq(struct efx_channel *channel)
1073 struct efx_nic *efx = channel->efx;
1074 unsigned entries;
1076 entries = channel->eventq_mask + 1;
1077 return efx_alloc_special_buffer(efx, &channel->eventq,
1078 entries * sizeof(efx_qword_t));
1081 void efx_nic_init_eventq(struct efx_channel *channel)
1083 efx_oword_t reg;
1084 struct efx_nic *efx = channel->efx;
1086 netif_dbg(efx, hw, efx->net_dev,
1087 "channel %d event queue in special buffers %d-%d\n",
1088 channel->channel, channel->eventq.index,
1089 channel->eventq.index + channel->eventq.entries - 1);
1091 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1092 EFX_POPULATE_OWORD_3(reg,
1093 FRF_CZ_TIMER_Q_EN, 1,
1094 FRF_CZ_HOST_NOTIFY_MODE, 0,
1095 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1096 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1099 /* Pin event queue buffer */
1100 efx_init_special_buffer(efx, &channel->eventq);
1102 /* Fill event queue with all ones (i.e. empty events) */
1103 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1105 /* Push event queue to card */
1106 EFX_POPULATE_OWORD_3(reg,
1107 FRF_AZ_EVQ_EN, 1,
1108 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1109 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1110 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1111 channel->channel);
1113 efx->type->push_irq_moderation(channel);
1116 void efx_nic_fini_eventq(struct efx_channel *channel)
1118 efx_oword_t reg;
1119 struct efx_nic *efx = channel->efx;
1121 /* Remove event queue from card */
1122 EFX_ZERO_OWORD(reg);
1123 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1124 channel->channel);
1125 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1126 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1128 /* Unpin event queue */
1129 efx_fini_special_buffer(efx, &channel->eventq);
1132 /* Free buffers backing event queue */
1133 void efx_nic_remove_eventq(struct efx_channel *channel)
1135 efx_free_special_buffer(channel->efx, &channel->eventq);
1139 void efx_nic_generate_test_event(struct efx_channel *channel)
1141 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1142 efx_qword_t test_event;
1144 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1145 FSE_AZ_EV_CODE_DRV_GEN_EV,
1146 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1147 efx_generate_event(channel, &test_event);
1150 void efx_nic_generate_fill_event(struct efx_channel *channel)
1152 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1153 efx_qword_t test_event;
1155 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1156 FSE_AZ_EV_CODE_DRV_GEN_EV,
1157 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1158 efx_generate_event(channel, &test_event);
1161 /**************************************************************************
1163 * Flush handling
1165 **************************************************************************/
1168 static void efx_poll_flush_events(struct efx_nic *efx)
1170 struct efx_channel *channel = efx_get_channel(efx, 0);
1171 struct efx_tx_queue *tx_queue;
1172 struct efx_rx_queue *rx_queue;
1173 unsigned int read_ptr = channel->eventq_read_ptr;
1174 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1176 do {
1177 efx_qword_t *event = efx_event(channel, read_ptr);
1178 int ev_code, ev_sub_code, ev_queue;
1179 bool ev_failed;
1181 if (!efx_event_present(event))
1182 break;
1184 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1185 ev_sub_code = EFX_QWORD_FIELD(*event,
1186 FSF_AZ_DRIVER_EV_SUBCODE);
1187 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1188 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1189 ev_queue = EFX_QWORD_FIELD(*event,
1190 FSF_AZ_DRIVER_EV_SUBDATA);
1191 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1192 tx_queue = efx_get_tx_queue(
1193 efx, ev_queue / EFX_TXQ_TYPES,
1194 ev_queue % EFX_TXQ_TYPES);
1195 tx_queue->flushed = FLUSH_DONE;
1197 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1198 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1199 ev_queue = EFX_QWORD_FIELD(
1200 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1201 ev_failed = EFX_QWORD_FIELD(
1202 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1203 if (ev_queue < efx->n_rx_channels) {
1204 rx_queue = efx_get_rx_queue(efx, ev_queue);
1205 rx_queue->flushed =
1206 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1210 /* We're about to destroy the queue anyway, so
1211 * it's ok to throw away every non-flush event */
1212 EFX_SET_QWORD(*event);
1214 ++read_ptr;
1215 } while (read_ptr != end_ptr);
1217 channel->eventq_read_ptr = read_ptr;
1220 /* Handle tx and rx flushes at the same time, since they run in
1221 * parallel in the hardware and there's no reason for us to
1222 * serialise them */
1223 int efx_nic_flush_queues(struct efx_nic *efx)
1225 struct efx_channel *channel;
1226 struct efx_rx_queue *rx_queue;
1227 struct efx_tx_queue *tx_queue;
1228 int i, tx_pending, rx_pending;
1230 /* If necessary prepare the hardware for flushing */
1231 efx->type->prepare_flush(efx);
1233 /* Flush all tx queues in parallel */
1234 efx_for_each_channel(channel, efx) {
1235 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1236 if (tx_queue->initialised)
1237 efx_flush_tx_queue(tx_queue);
1241 /* The hardware supports four concurrent rx flushes, each of which may
1242 * need to be retried if there is an outstanding descriptor fetch */
1243 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1244 rx_pending = tx_pending = 0;
1245 efx_for_each_channel(channel, efx) {
1246 efx_for_each_channel_rx_queue(rx_queue, channel) {
1247 if (rx_queue->flushed == FLUSH_PENDING)
1248 ++rx_pending;
1251 efx_for_each_channel(channel, efx) {
1252 efx_for_each_channel_rx_queue(rx_queue, channel) {
1253 if (rx_pending == EFX_RX_FLUSH_COUNT)
1254 break;
1255 if (rx_queue->flushed == FLUSH_FAILED ||
1256 rx_queue->flushed == FLUSH_NONE) {
1257 efx_flush_rx_queue(rx_queue);
1258 ++rx_pending;
1261 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1262 if (tx_queue->initialised &&
1263 tx_queue->flushed != FLUSH_DONE)
1264 ++tx_pending;
1268 if (rx_pending == 0 && tx_pending == 0)
1269 return 0;
1271 msleep(EFX_FLUSH_INTERVAL);
1272 efx_poll_flush_events(efx);
1275 /* Mark the queues as all flushed. We're going to return failure
1276 * leading to a reset, or fake up success anyway */
1277 efx_for_each_channel(channel, efx) {
1278 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1279 if (tx_queue->initialised &&
1280 tx_queue->flushed != FLUSH_DONE)
1281 netif_err(efx, hw, efx->net_dev,
1282 "tx queue %d flush command timed out\n",
1283 tx_queue->queue);
1284 tx_queue->flushed = FLUSH_DONE;
1286 efx_for_each_channel_rx_queue(rx_queue, channel) {
1287 if (rx_queue->flushed != FLUSH_DONE)
1288 netif_err(efx, hw, efx->net_dev,
1289 "rx queue %d flush command timed out\n",
1290 efx_rx_queue_index(rx_queue));
1291 rx_queue->flushed = FLUSH_DONE;
1295 return -ETIMEDOUT;
1298 /**************************************************************************
1300 * Hardware interrupts
1301 * The hardware interrupt handler does very little work; all the event
1302 * queue processing is carried out by per-channel tasklets.
1304 **************************************************************************/
1306 /* Enable/disable/generate interrupts */
1307 static inline void efx_nic_interrupts(struct efx_nic *efx,
1308 bool enabled, bool force)
1310 efx_oword_t int_en_reg_ker;
1312 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1313 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1314 FRF_AZ_KER_INT_KER, force,
1315 FRF_AZ_DRV_INT_EN_KER, enabled);
1316 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1319 void efx_nic_enable_interrupts(struct efx_nic *efx)
1321 struct efx_channel *channel;
1323 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1324 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1326 /* Enable interrupts */
1327 efx_nic_interrupts(efx, true, false);
1329 /* Force processing of all the channels to get the EVQ RPTRs up to
1330 date */
1331 efx_for_each_channel(channel, efx)
1332 efx_schedule_channel(channel);
1335 void efx_nic_disable_interrupts(struct efx_nic *efx)
1337 /* Disable interrupts */
1338 efx_nic_interrupts(efx, false, false);
1341 /* Generate a test interrupt
1342 * Interrupt must already have been enabled, otherwise nasty things
1343 * may happen.
1345 void efx_nic_generate_interrupt(struct efx_nic *efx)
1347 efx_nic_interrupts(efx, true, true);
1350 /* Process a fatal interrupt
1351 * Disable bus mastering ASAP and schedule a reset
1353 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1355 struct falcon_nic_data *nic_data = efx->nic_data;
1356 efx_oword_t *int_ker = efx->irq_status.addr;
1357 efx_oword_t fatal_intr;
1358 int error, mem_perr;
1360 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1361 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1363 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1364 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1365 EFX_OWORD_VAL(fatal_intr),
1366 error ? "disabling bus mastering" : "no recognised error");
1368 /* If this is a memory parity error dump which blocks are offending */
1369 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1370 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1371 if (mem_perr) {
1372 efx_oword_t reg;
1373 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1374 netif_err(efx, hw, efx->net_dev,
1375 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1376 EFX_OWORD_VAL(reg));
1379 /* Disable both devices */
1380 pci_clear_master(efx->pci_dev);
1381 if (efx_nic_is_dual_func(efx))
1382 pci_clear_master(nic_data->pci_dev2);
1383 efx_nic_disable_interrupts(efx);
1385 /* Count errors and reset or disable the NIC accordingly */
1386 if (efx->int_error_count == 0 ||
1387 time_after(jiffies, efx->int_error_expire)) {
1388 efx->int_error_count = 0;
1389 efx->int_error_expire =
1390 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1392 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1393 netif_err(efx, hw, efx->net_dev,
1394 "SYSTEM ERROR - reset scheduled\n");
1395 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1396 } else {
1397 netif_err(efx, hw, efx->net_dev,
1398 "SYSTEM ERROR - max number of errors seen."
1399 "NIC will be disabled\n");
1400 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1403 return IRQ_HANDLED;
1406 /* Handle a legacy interrupt
1407 * Acknowledges the interrupt and schedule event queue processing.
1409 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1411 struct efx_nic *efx = dev_id;
1412 efx_oword_t *int_ker = efx->irq_status.addr;
1413 irqreturn_t result = IRQ_NONE;
1414 struct efx_channel *channel;
1415 efx_dword_t reg;
1416 u32 queues;
1417 int syserr;
1419 /* Could this be ours? If interrupts are disabled then the
1420 * channel state may not be valid.
1422 if (!efx->legacy_irq_enabled)
1423 return result;
1425 /* Read the ISR which also ACKs the interrupts */
1426 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1427 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1429 /* Check to see if we have a serious error condition */
1430 if (queues & (1U << efx->fatal_irq_level)) {
1431 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1432 if (unlikely(syserr))
1433 return efx_nic_fatal_interrupt(efx);
1436 if (queues != 0) {
1437 if (EFX_WORKAROUND_15783(efx))
1438 efx->irq_zero_count = 0;
1440 /* Schedule processing of any interrupting queues */
1441 efx_for_each_channel(channel, efx) {
1442 if (queues & 1)
1443 efx_schedule_channel(channel);
1444 queues >>= 1;
1446 result = IRQ_HANDLED;
1448 } else if (EFX_WORKAROUND_15783(efx)) {
1449 efx_qword_t *event;
1451 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1452 * because this might be a shared interrupt. */
1453 if (efx->irq_zero_count++ == 0)
1454 result = IRQ_HANDLED;
1456 /* Ensure we schedule or rearm all event queues */
1457 efx_for_each_channel(channel, efx) {
1458 event = efx_event(channel, channel->eventq_read_ptr);
1459 if (efx_event_present(event))
1460 efx_schedule_channel(channel);
1461 else
1462 efx_nic_eventq_read_ack(channel);
1466 if (result == IRQ_HANDLED) {
1467 efx->last_irq_cpu = raw_smp_processor_id();
1468 netif_vdbg(efx, intr, efx->net_dev,
1469 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1470 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1473 return result;
1476 /* Handle an MSI interrupt
1478 * Handle an MSI hardware interrupt. This routine schedules event
1479 * queue processing. No interrupt acknowledgement cycle is necessary.
1480 * Also, we never need to check that the interrupt is for us, since
1481 * MSI interrupts cannot be shared.
1483 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1485 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1486 struct efx_nic *efx = channel->efx;
1487 efx_oword_t *int_ker = efx->irq_status.addr;
1488 int syserr;
1490 efx->last_irq_cpu = raw_smp_processor_id();
1491 netif_vdbg(efx, intr, efx->net_dev,
1492 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1493 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1495 /* Check to see if we have a serious error condition */
1496 if (channel->channel == efx->fatal_irq_level) {
1497 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1498 if (unlikely(syserr))
1499 return efx_nic_fatal_interrupt(efx);
1502 /* Schedule processing of the channel */
1503 efx_schedule_channel(channel);
1505 return IRQ_HANDLED;
1509 /* Setup RSS indirection table.
1510 * This maps from the hash value of the packet to RXQ
1512 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1514 size_t i = 0;
1515 efx_dword_t dword;
1517 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1518 return;
1520 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1521 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1523 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1524 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1525 efx->rx_indir_table[i]);
1526 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1530 /* Hook interrupt handler(s)
1531 * Try MSI and then legacy interrupts.
1533 int efx_nic_init_interrupt(struct efx_nic *efx)
1535 struct efx_channel *channel;
1536 int rc;
1538 if (!EFX_INT_MODE_USE_MSI(efx)) {
1539 irq_handler_t handler;
1540 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1541 handler = efx_legacy_interrupt;
1542 else
1543 handler = falcon_legacy_interrupt_a1;
1545 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1546 efx->name, efx);
1547 if (rc) {
1548 netif_err(efx, drv, efx->net_dev,
1549 "failed to hook legacy IRQ %d\n",
1550 efx->pci_dev->irq);
1551 goto fail1;
1553 return 0;
1556 /* Hook MSI or MSI-X interrupt */
1557 efx_for_each_channel(channel, efx) {
1558 rc = request_irq(channel->irq, efx_msi_interrupt,
1559 IRQF_PROBE_SHARED, /* Not shared */
1560 efx->channel_name[channel->channel],
1561 &efx->channel[channel->channel]);
1562 if (rc) {
1563 netif_err(efx, drv, efx->net_dev,
1564 "failed to hook IRQ %d\n", channel->irq);
1565 goto fail2;
1569 return 0;
1571 fail2:
1572 efx_for_each_channel(channel, efx)
1573 free_irq(channel->irq, &efx->channel[channel->channel]);
1574 fail1:
1575 return rc;
1578 void efx_nic_fini_interrupt(struct efx_nic *efx)
1580 struct efx_channel *channel;
1581 efx_oword_t reg;
1583 /* Disable MSI/MSI-X interrupts */
1584 efx_for_each_channel(channel, efx) {
1585 if (channel->irq)
1586 free_irq(channel->irq, &efx->channel[channel->channel]);
1589 /* ACK legacy interrupt */
1590 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1591 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1592 else
1593 falcon_irq_ack_a1(efx);
1595 /* Disable legacy interrupt */
1596 if (efx->legacy_irq)
1597 free_irq(efx->legacy_irq, efx);
1600 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1602 efx_oword_t altera_build;
1603 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1604 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1607 void efx_nic_init_common(struct efx_nic *efx)
1609 efx_oword_t temp;
1611 /* Set positions of descriptor caches in SRAM. */
1612 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1613 efx->type->tx_dc_base / 8);
1614 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1615 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1616 efx->type->rx_dc_base / 8);
1617 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1619 /* Set TX descriptor cache size. */
1620 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1621 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1622 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1624 /* Set RX descriptor cache size. Set low watermark to size-8, as
1625 * this allows most efficient prefetching.
1627 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1628 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1629 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1630 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1631 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1633 /* Program INT_KER address */
1634 EFX_POPULATE_OWORD_2(temp,
1635 FRF_AZ_NORM_INT_VEC_DIS_KER,
1636 EFX_INT_MODE_USE_MSI(efx),
1637 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1638 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1640 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1641 /* Use an interrupt level unused by event queues */
1642 efx->fatal_irq_level = 0x1f;
1643 else
1644 /* Use a valid MSI-X vector */
1645 efx->fatal_irq_level = 0;
1647 /* Enable all the genuinely fatal interrupts. (They are still
1648 * masked by the overall interrupt mask, controlled by
1649 * falcon_interrupts()).
1651 * Note: All other fatal interrupts are enabled
1653 EFX_POPULATE_OWORD_3(temp,
1654 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1655 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1656 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1657 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1658 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1659 EFX_INVERT_OWORD(temp);
1660 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1662 efx_nic_push_rx_indir_table(efx);
1664 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1665 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1667 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1668 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1669 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1671 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1673 /* Enable SW_EV to inherit in char driver - assume harmless here */
1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1675 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1676 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1677 /* Disable hardware watchdog which can misfire */
1678 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1679 /* Squash TX of packets of 16 bytes or less */
1680 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1681 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1682 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1684 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1685 EFX_POPULATE_OWORD_4(temp,
1686 /* Default values */
1687 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1688 FRF_BZ_TX_PACE_SB_AF, 0xb,
1689 FRF_BZ_TX_PACE_FB_BASE, 0,
1690 /* Allow large pace values in the
1691 * fast bin. */
1692 FRF_BZ_TX_PACE_BIN_TH,
1693 FFE_BZ_TX_PACE_RESERVED);
1694 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1698 /* Register dump */
1700 #define REGISTER_REVISION_A 1
1701 #define REGISTER_REVISION_B 2
1702 #define REGISTER_REVISION_C 3
1703 #define REGISTER_REVISION_Z 3 /* latest revision */
1705 struct efx_nic_reg {
1706 u32 offset:24;
1707 u32 min_revision:2, max_revision:2;
1710 #define REGISTER(name, min_rev, max_rev) { \
1711 FR_ ## min_rev ## max_rev ## _ ## name, \
1712 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1714 #define REGISTER_AA(name) REGISTER(name, A, A)
1715 #define REGISTER_AB(name) REGISTER(name, A, B)
1716 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1717 #define REGISTER_BB(name) REGISTER(name, B, B)
1718 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1719 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1721 static const struct efx_nic_reg efx_nic_regs[] = {
1722 REGISTER_AZ(ADR_REGION),
1723 REGISTER_AZ(INT_EN_KER),
1724 REGISTER_BZ(INT_EN_CHAR),
1725 REGISTER_AZ(INT_ADR_KER),
1726 REGISTER_BZ(INT_ADR_CHAR),
1727 /* INT_ACK_KER is WO */
1728 /* INT_ISR0 is RC */
1729 REGISTER_AZ(HW_INIT),
1730 REGISTER_CZ(USR_EV_CFG),
1731 REGISTER_AB(EE_SPI_HCMD),
1732 REGISTER_AB(EE_SPI_HADR),
1733 REGISTER_AB(EE_SPI_HDATA),
1734 REGISTER_AB(EE_BASE_PAGE),
1735 REGISTER_AB(EE_VPD_CFG0),
1736 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1737 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1738 /* PCIE_CORE_INDIRECT is indirect */
1739 REGISTER_AB(NIC_STAT),
1740 REGISTER_AB(GPIO_CTL),
1741 REGISTER_AB(GLB_CTL),
1742 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1743 REGISTER_BZ(DP_CTRL),
1744 REGISTER_AZ(MEM_STAT),
1745 REGISTER_AZ(CS_DEBUG),
1746 REGISTER_AZ(ALTERA_BUILD),
1747 REGISTER_AZ(CSR_SPARE),
1748 REGISTER_AB(PCIE_SD_CTL0123),
1749 REGISTER_AB(PCIE_SD_CTL45),
1750 REGISTER_AB(PCIE_PCS_CTL_STAT),
1751 /* DEBUG_DATA_OUT is not used */
1752 /* DRV_EV is WO */
1753 REGISTER_AZ(EVQ_CTL),
1754 REGISTER_AZ(EVQ_CNT1),
1755 REGISTER_AZ(EVQ_CNT2),
1756 REGISTER_AZ(BUF_TBL_CFG),
1757 REGISTER_AZ(SRM_RX_DC_CFG),
1758 REGISTER_AZ(SRM_TX_DC_CFG),
1759 REGISTER_AZ(SRM_CFG),
1760 /* BUF_TBL_UPD is WO */
1761 REGISTER_AZ(SRM_UPD_EVQ),
1762 REGISTER_AZ(SRAM_PARITY),
1763 REGISTER_AZ(RX_CFG),
1764 REGISTER_BZ(RX_FILTER_CTL),
1765 /* RX_FLUSH_DESCQ is WO */
1766 REGISTER_AZ(RX_DC_CFG),
1767 REGISTER_AZ(RX_DC_PF_WM),
1768 REGISTER_BZ(RX_RSS_TKEY),
1769 /* RX_NODESC_DROP is RC */
1770 REGISTER_AA(RX_SELF_RST),
1771 /* RX_DEBUG, RX_PUSH_DROP are not used */
1772 REGISTER_CZ(RX_RSS_IPV6_REG1),
1773 REGISTER_CZ(RX_RSS_IPV6_REG2),
1774 REGISTER_CZ(RX_RSS_IPV6_REG3),
1775 /* TX_FLUSH_DESCQ is WO */
1776 REGISTER_AZ(TX_DC_CFG),
1777 REGISTER_AA(TX_CHKSM_CFG),
1778 REGISTER_AZ(TX_CFG),
1779 /* TX_PUSH_DROP is not used */
1780 REGISTER_AZ(TX_RESERVED),
1781 REGISTER_BZ(TX_PACE),
1782 /* TX_PACE_DROP_QID is RC */
1783 REGISTER_BB(TX_VLAN),
1784 REGISTER_BZ(TX_IPFIL_PORTEN),
1785 REGISTER_AB(MD_TXD),
1786 REGISTER_AB(MD_RXD),
1787 REGISTER_AB(MD_CS),
1788 REGISTER_AB(MD_PHY_ADR),
1789 REGISTER_AB(MD_ID),
1790 /* MD_STAT is RC */
1791 REGISTER_AB(MAC_STAT_DMA),
1792 REGISTER_AB(MAC_CTRL),
1793 REGISTER_BB(GEN_MODE),
1794 REGISTER_AB(MAC_MC_HASH_REG0),
1795 REGISTER_AB(MAC_MC_HASH_REG1),
1796 REGISTER_AB(GM_CFG1),
1797 REGISTER_AB(GM_CFG2),
1798 /* GM_IPG and GM_HD are not used */
1799 REGISTER_AB(GM_MAX_FLEN),
1800 /* GM_TEST is not used */
1801 REGISTER_AB(GM_ADR1),
1802 REGISTER_AB(GM_ADR2),
1803 REGISTER_AB(GMF_CFG0),
1804 REGISTER_AB(GMF_CFG1),
1805 REGISTER_AB(GMF_CFG2),
1806 REGISTER_AB(GMF_CFG3),
1807 REGISTER_AB(GMF_CFG4),
1808 REGISTER_AB(GMF_CFG5),
1809 REGISTER_BB(TX_SRC_MAC_CTL),
1810 REGISTER_AB(XM_ADR_LO),
1811 REGISTER_AB(XM_ADR_HI),
1812 REGISTER_AB(XM_GLB_CFG),
1813 REGISTER_AB(XM_TX_CFG),
1814 REGISTER_AB(XM_RX_CFG),
1815 REGISTER_AB(XM_MGT_INT_MASK),
1816 REGISTER_AB(XM_FC),
1817 REGISTER_AB(XM_PAUSE_TIME),
1818 REGISTER_AB(XM_TX_PARAM),
1819 REGISTER_AB(XM_RX_PARAM),
1820 /* XM_MGT_INT_MSK (note no 'A') is RC */
1821 REGISTER_AB(XX_PWR_RST),
1822 REGISTER_AB(XX_SD_CTL),
1823 REGISTER_AB(XX_TXDRV_CTL),
1824 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1825 /* XX_CORE_STAT is partly RC */
1828 struct efx_nic_reg_table {
1829 u32 offset:24;
1830 u32 min_revision:2, max_revision:2;
1831 u32 step:6, rows:21;
1834 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1835 offset, \
1836 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1837 step, rows \
1839 #define REGISTER_TABLE(name, min_rev, max_rev) \
1840 REGISTER_TABLE_DIMENSIONS( \
1841 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1842 min_rev, max_rev, \
1843 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1844 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1845 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1846 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1847 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1848 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1849 #define REGISTER_TABLE_BB_CZ(name) \
1850 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1851 FR_BZ_ ## name ## _STEP, \
1852 FR_BB_ ## name ## _ROWS), \
1853 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1854 FR_BZ_ ## name ## _STEP, \
1855 FR_CZ_ ## name ## _ROWS)
1856 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1858 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1859 /* DRIVER is not used */
1860 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1861 REGISTER_TABLE_BB(TX_IPFIL_TBL),
1862 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1863 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1864 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1865 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1866 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1867 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1868 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1869 /* We can't reasonably read all of the buffer table (up to 8MB!).
1870 * However this driver will only use a few entries. Reading
1871 * 1K entries allows for some expansion of queue count and
1872 * size before we need to change the version. */
1873 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1874 A, A, 8, 1024),
1875 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1876 B, Z, 8, 1024),
1877 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1878 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1879 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1880 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1881 /* TX_FILTER_TBL0 is huge and not used by this driver */
1882 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1883 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1884 /* MSIX_PBA_TABLE is not mapped */
1885 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1886 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1889 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1891 const struct efx_nic_reg *reg;
1892 const struct efx_nic_reg_table *table;
1893 size_t len = 0;
1895 for (reg = efx_nic_regs;
1896 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1897 reg++)
1898 if (efx->type->revision >= reg->min_revision &&
1899 efx->type->revision <= reg->max_revision)
1900 len += sizeof(efx_oword_t);
1902 for (table = efx_nic_reg_tables;
1903 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1904 table++)
1905 if (efx->type->revision >= table->min_revision &&
1906 efx->type->revision <= table->max_revision)
1907 len += table->rows * min_t(size_t, table->step, 16);
1909 return len;
1912 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1914 const struct efx_nic_reg *reg;
1915 const struct efx_nic_reg_table *table;
1917 for (reg = efx_nic_regs;
1918 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1919 reg++) {
1920 if (efx->type->revision >= reg->min_revision &&
1921 efx->type->revision <= reg->max_revision) {
1922 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1923 buf += sizeof(efx_oword_t);
1927 for (table = efx_nic_reg_tables;
1928 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1929 table++) {
1930 size_t size, i;
1932 if (!(efx->type->revision >= table->min_revision &&
1933 efx->type->revision <= table->max_revision))
1934 continue;
1936 size = min_t(size_t, table->step, 16);
1938 for (i = 0; i < table->rows; i++) {
1939 switch (table->step) {
1940 case 4: /* 32-bit register or SRAM */
1941 efx_readd_table(efx, buf, table->offset, i);
1942 break;
1943 case 8: /* 64-bit SRAM */
1944 efx_sram_readq(efx,
1945 efx->membase + table->offset,
1946 buf, i);
1947 break;
1948 case 16: /* 128-bit register */
1949 efx_reado_table(efx, buf, table->offset, i);
1950 break;
1951 case 32: /* 128-bit register, interleaved */
1952 efx_reado_table(efx, buf, table->offset, 2 * i);
1953 break;
1954 default:
1955 WARN_ON(1);
1956 return;
1958 buf += size;