1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/module.h>
13 #include <linux/seq_file.h>
14 #include <linux/crc32.h>
15 #include "net_driver.h"
19 #include "farch_regs.h"
21 #include "workarounds.h"
23 /* Falcon-architecture (SFC4000) support */
25 /**************************************************************************
29 **************************************************************************
32 /* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
45 /* If EF4_MAX_INT_ERRORS internal errors occur within
46 * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
49 #define EF4_INT_ERROR_EXPIRE 3600
50 #define EF4_MAX_INT_ERRORS 5
52 /* Depth of RX flush request fifo */
53 #define EF4_RX_FLUSH_COUNT 4
55 /* Driver generated events */
56 #define _EF4_CHANNEL_MAGIC_TEST 0x000101
57 #define _EF4_CHANNEL_MAGIC_FILL 0x000102
58 #define _EF4_CHANNEL_MAGIC_RX_DRAIN 0x000103
59 #define _EF4_CHANNEL_MAGIC_TX_DRAIN 0x000104
61 #define _EF4_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62 #define _EF4_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64 #define EF4_CHANNEL_MAGIC_TEST(_channel) \
65 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EF4_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL, \
68 ef4_rx_queue_index(_rx_queue))
69 #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN, \
71 ef4_rx_queue_index(_rx_queue))
72 #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN, \
76 static void ef4_farch_magic_event(struct ef4_channel
*channel
, u32 magic
);
78 /**************************************************************************
82 **************************************************************************/
84 static inline void ef4_write_buf_tbl(struct ef4_nic
*efx
, ef4_qword_t
*value
,
87 ef4_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
91 static bool ef4_masked_compare_oword(const ef4_oword_t
*a
, const ef4_oword_t
*b
,
92 const ef4_oword_t
*mask
)
94 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
95 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
98 int ef4_farch_test_registers(struct ef4_nic
*efx
,
99 const struct ef4_farch_register_test
*regs
,
102 unsigned address
= 0;
104 ef4_oword_t mask
, imask
, original
, reg
, buf
;
106 for (i
= 0; i
< n_regs
; ++i
) {
107 address
= regs
[i
].address
;
108 mask
= imask
= regs
[i
].mask
;
109 EF4_INVERT_OWORD(imask
);
111 ef4_reado(efx
, &original
, address
);
113 /* bit sweep on and off */
114 for (j
= 0; j
< 128; j
++) {
115 if (!EF4_EXTRACT_OWORD32(mask
, j
, j
))
118 /* Test this testable bit can be set in isolation */
119 EF4_AND_OWORD(reg
, original
, mask
);
120 EF4_SET_OWORD32(reg
, j
, j
, 1);
122 ef4_writeo(efx
, ®
, address
);
123 ef4_reado(efx
, &buf
, address
);
125 if (ef4_masked_compare_oword(®
, &buf
, &mask
))
128 /* Test this testable bit can be cleared in isolation */
129 EF4_OR_OWORD(reg
, original
, mask
);
130 EF4_SET_OWORD32(reg
, j
, j
, 0);
132 ef4_writeo(efx
, ®
, address
);
133 ef4_reado(efx
, &buf
, address
);
135 if (ef4_masked_compare_oword(®
, &buf
, &mask
))
139 ef4_writeo(efx
, &original
, address
);
145 netif_err(efx
, hw
, efx
->net_dev
,
146 "wrote "EF4_OWORD_FMT
" read "EF4_OWORD_FMT
147 " at address 0x%x mask "EF4_OWORD_FMT
"\n", EF4_OWORD_VAL(reg
),
148 EF4_OWORD_VAL(buf
), address
, EF4_OWORD_VAL(mask
));
152 /**************************************************************************
154 * Special buffer handling
155 * Special buffers are used for event queues and the TX and RX
158 *************************************************************************/
161 * Initialise a special buffer
163 * This will define a buffer (previously allocated via
164 * ef4_alloc_special_buffer()) in the buffer table, allowing
165 * it to be used for event queues, descriptor rings etc.
168 ef4_init_special_buffer(struct ef4_nic
*efx
, struct ef4_special_buffer
*buffer
)
170 ef4_qword_t buf_desc
;
175 EF4_BUG_ON_PARANOID(!buffer
->buf
.addr
);
177 /* Write buffer descriptors to NIC */
178 for (i
= 0; i
< buffer
->entries
; i
++) {
179 index
= buffer
->index
+ i
;
180 dma_addr
= buffer
->buf
.dma_addr
+ (i
* EF4_BUF_SIZE
);
181 netif_dbg(efx
, probe
, efx
->net_dev
,
182 "mapping special buffer %d at %llx\n",
183 index
, (unsigned long long)dma_addr
);
184 EF4_POPULATE_QWORD_3(buf_desc
,
185 FRF_AZ_BUF_ADR_REGION
, 0,
186 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
187 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
188 ef4_write_buf_tbl(efx
, &buf_desc
, index
);
192 /* Unmaps a buffer and clears the buffer table entries */
194 ef4_fini_special_buffer(struct ef4_nic
*efx
, struct ef4_special_buffer
*buffer
)
196 ef4_oword_t buf_tbl_upd
;
197 unsigned int start
= buffer
->index
;
198 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
200 if (!buffer
->entries
)
203 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
204 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
206 EF4_POPULATE_OWORD_4(buf_tbl_upd
,
207 FRF_AZ_BUF_UPD_CMD
, 0,
208 FRF_AZ_BUF_CLR_CMD
, 1,
209 FRF_AZ_BUF_CLR_END_ID
, end
,
210 FRF_AZ_BUF_CLR_START_ID
, start
);
211 ef4_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
215 * Allocate a new special buffer
217 * This allocates memory for a new buffer, clears it and allocates a
218 * new buffer ID range. It does not write into the buffer table.
220 * This call will allocate 4KB buffers, since 8KB buffers can't be
221 * used for event queues and descriptor rings.
223 static int ef4_alloc_special_buffer(struct ef4_nic
*efx
,
224 struct ef4_special_buffer
*buffer
,
227 len
= ALIGN(len
, EF4_BUF_SIZE
);
229 if (ef4_nic_alloc_buffer(efx
, &buffer
->buf
, len
, GFP_KERNEL
))
231 buffer
->entries
= len
/ EF4_BUF_SIZE
;
232 BUG_ON(buffer
->buf
.dma_addr
& (EF4_BUF_SIZE
- 1));
234 /* Select new buffer ID */
235 buffer
->index
= efx
->next_buffer_table
;
236 efx
->next_buffer_table
+= buffer
->entries
;
238 netif_dbg(efx
, probe
, efx
->net_dev
,
239 "allocating special buffers %d-%d at %llx+%x "
240 "(virt %p phys %llx)\n", buffer
->index
,
241 buffer
->index
+ buffer
->entries
- 1,
242 (u64
)buffer
->buf
.dma_addr
, len
,
243 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
249 ef4_free_special_buffer(struct ef4_nic
*efx
, struct ef4_special_buffer
*buffer
)
251 if (!buffer
->buf
.addr
)
254 netif_dbg(efx
, hw
, efx
->net_dev
,
255 "deallocating special buffers %d-%d at %llx+%x "
256 "(virt %p phys %llx)\n", buffer
->index
,
257 buffer
->index
+ buffer
->entries
- 1,
258 (u64
)buffer
->buf
.dma_addr
, buffer
->buf
.len
,
259 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
261 ef4_nic_free_buffer(efx
, &buffer
->buf
);
265 /**************************************************************************
269 **************************************************************************/
271 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
272 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue
*tx_queue
)
277 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
278 EF4_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
279 ef4_writed_page(tx_queue
->efx
, ®
,
280 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
283 /* Write pointer and first descriptor for TX descriptor ring */
284 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue
*tx_queue
,
285 const ef4_qword_t
*txd
)
290 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
291 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
293 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
294 EF4_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
295 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
297 ef4_writeo_page(tx_queue
->efx
, ®
,
298 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
302 /* For each entry inserted into the software descriptor ring, create a
303 * descriptor in the hardware TX descriptor ring (in host memory), and
306 void ef4_farch_tx_write(struct ef4_tx_queue
*tx_queue
)
308 struct ef4_tx_buffer
*buffer
;
311 unsigned old_write_count
= tx_queue
->write_count
;
313 tx_queue
->xmit_more_available
= false;
314 if (unlikely(tx_queue
->write_count
== tx_queue
->insert_count
))
318 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
319 buffer
= &tx_queue
->buffer
[write_ptr
];
320 txd
= ef4_tx_desc(tx_queue
, write_ptr
);
321 ++tx_queue
->write_count
;
323 EF4_BUG_ON_PARANOID(buffer
->flags
& EF4_TX_BUF_OPTION
);
325 /* Create TX descriptor ring entry */
326 BUILD_BUG_ON(EF4_TX_BUF_CONT
!= 1);
327 EF4_POPULATE_QWORD_4(*txd
,
329 buffer
->flags
& EF4_TX_BUF_CONT
,
330 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
331 FSF_AZ_TX_KER_BUF_REGION
, 0,
332 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
333 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
335 wmb(); /* Ensure descriptors are written before they are fetched */
337 if (ef4_nic_may_push_tx_desc(tx_queue
, old_write_count
)) {
338 txd
= ef4_tx_desc(tx_queue
,
339 old_write_count
& tx_queue
->ptr_mask
);
340 ef4_farch_push_tx_desc(tx_queue
, txd
);
343 ef4_farch_notify_tx_desc(tx_queue
);
347 unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue
*tx_queue
,
348 dma_addr_t dma_addr
, unsigned int len
)
350 /* Don't cross 4K boundaries with descriptors. */
351 unsigned int limit
= (~dma_addr
& (EF4_PAGE_SIZE
- 1)) + 1;
353 len
= min(limit
, len
);
355 if (EF4_WORKAROUND_5391(tx_queue
->efx
) && (dma_addr
& 0xf))
356 len
= min_t(unsigned int, len
, 512 - (dma_addr
& 0xf));
362 /* Allocate hardware resources for a TX queue */
363 int ef4_farch_tx_probe(struct ef4_tx_queue
*tx_queue
)
365 struct ef4_nic
*efx
= tx_queue
->efx
;
368 entries
= tx_queue
->ptr_mask
+ 1;
369 return ef4_alloc_special_buffer(efx
, &tx_queue
->txd
,
370 entries
* sizeof(ef4_qword_t
));
373 void ef4_farch_tx_init(struct ef4_tx_queue
*tx_queue
)
375 struct ef4_nic
*efx
= tx_queue
->efx
;
378 /* Pin TX descriptor ring */
379 ef4_init_special_buffer(efx
, &tx_queue
->txd
);
381 /* Push TX descriptor ring to card */
382 EF4_POPULATE_OWORD_10(reg
,
383 FRF_AZ_TX_DESCQ_EN
, 1,
384 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
385 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
386 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
387 FRF_AZ_TX_DESCQ_EVQ_ID
,
388 tx_queue
->channel
->channel
,
389 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
390 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
391 FRF_AZ_TX_DESCQ_SIZE
,
392 __ffs(tx_queue
->txd
.entries
),
393 FRF_AZ_TX_DESCQ_TYPE
, 0,
394 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
396 if (ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
) {
397 int csum
= tx_queue
->queue
& EF4_TXQ_TYPE_OFFLOAD
;
398 EF4_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
399 EF4_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
403 ef4_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
406 if (ef4_nic_rev(efx
) < EF4_REV_FALCON_B0
) {
407 /* Only 128 bits in this register */
408 BUILD_BUG_ON(EF4_MAX_TX_QUEUES
> 128);
410 ef4_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
411 if (tx_queue
->queue
& EF4_TXQ_TYPE_OFFLOAD
)
412 __clear_bit_le(tx_queue
->queue
, ®
);
414 __set_bit_le(tx_queue
->queue
, ®
);
415 ef4_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
418 if (ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
) {
419 EF4_POPULATE_OWORD_1(reg
,
421 (tx_queue
->queue
& EF4_TXQ_TYPE_HIGHPRI
) ?
423 FFE_BZ_TX_PACE_RESERVED
);
424 ef4_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
429 static void ef4_farch_flush_tx_queue(struct ef4_tx_queue
*tx_queue
)
431 struct ef4_nic
*efx
= tx_queue
->efx
;
432 ef4_oword_t tx_flush_descq
;
434 WARN_ON(atomic_read(&tx_queue
->flush_outstanding
));
435 atomic_set(&tx_queue
->flush_outstanding
, 1);
437 EF4_POPULATE_OWORD_2(tx_flush_descq
,
438 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
439 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
440 ef4_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
443 void ef4_farch_tx_fini(struct ef4_tx_queue
*tx_queue
)
445 struct ef4_nic
*efx
= tx_queue
->efx
;
446 ef4_oword_t tx_desc_ptr
;
448 /* Remove TX descriptor ring from card */
449 EF4_ZERO_OWORD(tx_desc_ptr
);
450 ef4_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
453 /* Unpin TX descriptor ring */
454 ef4_fini_special_buffer(efx
, &tx_queue
->txd
);
457 /* Free buffers backing TX queue */
458 void ef4_farch_tx_remove(struct ef4_tx_queue
*tx_queue
)
460 ef4_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
463 /**************************************************************************
467 **************************************************************************/
469 /* This creates an entry in the RX descriptor queue */
471 ef4_farch_build_rx_desc(struct ef4_rx_queue
*rx_queue
, unsigned index
)
473 struct ef4_rx_buffer
*rx_buf
;
476 rxd
= ef4_rx_desc(rx_queue
, index
);
477 rx_buf
= ef4_rx_buffer(rx_queue
, index
);
478 EF4_POPULATE_QWORD_3(*rxd
,
479 FSF_AZ_RX_KER_BUF_SIZE
,
481 rx_queue
->efx
->type
->rx_buffer_padding
,
482 FSF_AZ_RX_KER_BUF_REGION
, 0,
483 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
486 /* This writes to the RX_DESC_WPTR register for the specified receive
489 void ef4_farch_rx_write(struct ef4_rx_queue
*rx_queue
)
491 struct ef4_nic
*efx
= rx_queue
->efx
;
495 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
496 ef4_farch_build_rx_desc(
498 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
499 ++rx_queue
->notified_count
;
503 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
504 EF4_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
505 ef4_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
506 ef4_rx_queue_index(rx_queue
));
509 int ef4_farch_rx_probe(struct ef4_rx_queue
*rx_queue
)
511 struct ef4_nic
*efx
= rx_queue
->efx
;
514 entries
= rx_queue
->ptr_mask
+ 1;
515 return ef4_alloc_special_buffer(efx
, &rx_queue
->rxd
,
516 entries
* sizeof(ef4_qword_t
));
519 void ef4_farch_rx_init(struct ef4_rx_queue
*rx_queue
)
521 ef4_oword_t rx_desc_ptr
;
522 struct ef4_nic
*efx
= rx_queue
->efx
;
523 bool is_b0
= ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
;
524 bool iscsi_digest_en
= is_b0
;
527 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
528 * DMA to continue after a PCIe page boundary (and scattering
529 * is not possible). In Falcon B0 and Siena, it enables
532 jumbo_en
= !is_b0
|| efx
->rx_scatter
;
534 netif_dbg(efx
, hw
, efx
->net_dev
,
535 "RX queue %d ring in special buffers %d-%d\n",
536 ef4_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
537 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
539 rx_queue
->scatter_n
= 0;
541 /* Pin RX descriptor ring */
542 ef4_init_special_buffer(efx
, &rx_queue
->rxd
);
544 /* Push RX descriptor ring to card */
545 EF4_POPULATE_OWORD_10(rx_desc_ptr
,
546 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
547 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
548 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
549 FRF_AZ_RX_DESCQ_EVQ_ID
,
550 ef4_rx_queue_channel(rx_queue
)->channel
,
551 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
552 FRF_AZ_RX_DESCQ_LABEL
,
553 ef4_rx_queue_index(rx_queue
),
554 FRF_AZ_RX_DESCQ_SIZE
,
555 __ffs(rx_queue
->rxd
.entries
),
556 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
557 FRF_AZ_RX_DESCQ_JUMBO
, jumbo_en
,
558 FRF_AZ_RX_DESCQ_EN
, 1);
559 ef4_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
560 ef4_rx_queue_index(rx_queue
));
563 static void ef4_farch_flush_rx_queue(struct ef4_rx_queue
*rx_queue
)
565 struct ef4_nic
*efx
= rx_queue
->efx
;
566 ef4_oword_t rx_flush_descq
;
568 EF4_POPULATE_OWORD_2(rx_flush_descq
,
569 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
570 FRF_AZ_RX_FLUSH_DESCQ
,
571 ef4_rx_queue_index(rx_queue
));
572 ef4_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
575 void ef4_farch_rx_fini(struct ef4_rx_queue
*rx_queue
)
577 ef4_oword_t rx_desc_ptr
;
578 struct ef4_nic
*efx
= rx_queue
->efx
;
580 /* Remove RX descriptor ring from card */
581 EF4_ZERO_OWORD(rx_desc_ptr
);
582 ef4_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
583 ef4_rx_queue_index(rx_queue
));
585 /* Unpin RX descriptor ring */
586 ef4_fini_special_buffer(efx
, &rx_queue
->rxd
);
589 /* Free buffers backing RX queue */
590 void ef4_farch_rx_remove(struct ef4_rx_queue
*rx_queue
)
592 ef4_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
595 /**************************************************************************
599 **************************************************************************/
601 /* ef4_farch_flush_queues() must be woken up when all flushes are completed,
602 * or more RX flushes can be kicked off.
604 static bool ef4_farch_flush_wake(struct ef4_nic
*efx
)
606 /* Ensure that all updates are visible to ef4_farch_flush_queues() */
609 return (atomic_read(&efx
->active_queues
) == 0 ||
610 (atomic_read(&efx
->rxq_flush_outstanding
) < EF4_RX_FLUSH_COUNT
611 && atomic_read(&efx
->rxq_flush_pending
) > 0));
614 static bool ef4_check_tx_flush_complete(struct ef4_nic
*efx
)
617 ef4_oword_t txd_ptr_tbl
;
618 struct ef4_channel
*channel
;
619 struct ef4_tx_queue
*tx_queue
;
621 ef4_for_each_channel(channel
, efx
) {
622 ef4_for_each_channel_tx_queue(tx_queue
, channel
) {
623 ef4_reado_table(efx
, &txd_ptr_tbl
,
624 FR_BZ_TX_DESC_PTR_TBL
, tx_queue
->queue
);
625 if (EF4_OWORD_FIELD(txd_ptr_tbl
,
626 FRF_AZ_TX_DESCQ_FLUSH
) ||
627 EF4_OWORD_FIELD(txd_ptr_tbl
,
628 FRF_AZ_TX_DESCQ_EN
)) {
629 netif_dbg(efx
, hw
, efx
->net_dev
,
630 "flush did not complete on TXQ %d\n",
633 } else if (atomic_cmpxchg(&tx_queue
->flush_outstanding
,
635 /* The flush is complete, but we didn't
636 * receive a flush completion event
638 netif_dbg(efx
, hw
, efx
->net_dev
,
639 "flush complete on TXQ %d, so drain "
640 "the queue\n", tx_queue
->queue
);
641 /* Don't need to increment active_queues as it
642 * has already been incremented for the queues
643 * which did not drain
645 ef4_farch_magic_event(channel
,
646 EF4_CHANNEL_MAGIC_TX_DRAIN(
655 /* Flush all the transmit queues, and continue flushing receive queues until
656 * they're all flushed. Wait for the DRAIN events to be received so that there
657 * are no more RX and TX events left on any channel. */
658 static int ef4_farch_do_flush(struct ef4_nic
*efx
)
660 unsigned timeout
= msecs_to_jiffies(5000); /* 5s for all flushes and drains */
661 struct ef4_channel
*channel
;
662 struct ef4_rx_queue
*rx_queue
;
663 struct ef4_tx_queue
*tx_queue
;
666 ef4_for_each_channel(channel
, efx
) {
667 ef4_for_each_channel_tx_queue(tx_queue
, channel
) {
668 ef4_farch_flush_tx_queue(tx_queue
);
670 ef4_for_each_channel_rx_queue(rx_queue
, channel
) {
671 rx_queue
->flush_pending
= true;
672 atomic_inc(&efx
->rxq_flush_pending
);
676 while (timeout
&& atomic_read(&efx
->active_queues
) > 0) {
677 /* The hardware supports four concurrent rx flushes, each of
678 * which may need to be retried if there is an outstanding
681 ef4_for_each_channel(channel
, efx
) {
682 ef4_for_each_channel_rx_queue(rx_queue
, channel
) {
683 if (atomic_read(&efx
->rxq_flush_outstanding
) >=
687 if (rx_queue
->flush_pending
) {
688 rx_queue
->flush_pending
= false;
689 atomic_dec(&efx
->rxq_flush_pending
);
690 atomic_inc(&efx
->rxq_flush_outstanding
);
691 ef4_farch_flush_rx_queue(rx_queue
);
696 timeout
= wait_event_timeout(efx
->flush_wq
,
697 ef4_farch_flush_wake(efx
),
701 if (atomic_read(&efx
->active_queues
) &&
702 !ef4_check_tx_flush_complete(efx
)) {
703 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues "
704 "(rx %d+%d)\n", atomic_read(&efx
->active_queues
),
705 atomic_read(&efx
->rxq_flush_outstanding
),
706 atomic_read(&efx
->rxq_flush_pending
));
709 atomic_set(&efx
->active_queues
, 0);
710 atomic_set(&efx
->rxq_flush_pending
, 0);
711 atomic_set(&efx
->rxq_flush_outstanding
, 0);
717 int ef4_farch_fini_dmaq(struct ef4_nic
*efx
)
719 struct ef4_channel
*channel
;
720 struct ef4_tx_queue
*tx_queue
;
721 struct ef4_rx_queue
*rx_queue
;
724 /* Do not attempt to write to the NIC during EEH recovery */
725 if (efx
->state
!= STATE_RECOVERY
) {
726 /* Only perform flush if DMA is enabled */
727 if (efx
->pci_dev
->is_busmaster
) {
728 efx
->type
->prepare_flush(efx
);
729 rc
= ef4_farch_do_flush(efx
);
730 efx
->type
->finish_flush(efx
);
733 ef4_for_each_channel(channel
, efx
) {
734 ef4_for_each_channel_rx_queue(rx_queue
, channel
)
735 ef4_farch_rx_fini(rx_queue
);
736 ef4_for_each_channel_tx_queue(tx_queue
, channel
)
737 ef4_farch_tx_fini(tx_queue
);
744 /* Reset queue and flush accounting after FLR
746 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
747 * mastering was disabled), in which case we don't receive (RXQ) flush
748 * completion events. This means that efx->rxq_flush_outstanding remained at 4
749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750 * events were received, and we didn't go through ef4_check_tx_flush_complete())
751 * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753 * for batched flush requests; and the efx->active_queues gets messed up because
754 * we keep incrementing for the newly initialised queues, but it never went to
755 * zero previously. Then we get a timeout every time we try to restart the
756 * queues, as it doesn't go back to zero when we should be flushing the queues.
758 void ef4_farch_finish_flr(struct ef4_nic
*efx
)
760 atomic_set(&efx
->rxq_flush_pending
, 0);
761 atomic_set(&efx
->rxq_flush_outstanding
, 0);
762 atomic_set(&efx
->active_queues
, 0);
766 /**************************************************************************
768 * Event queue processing
769 * Event queues are processed by per-channel tasklets.
771 **************************************************************************/
773 /* Update a channel's event queue's read pointer (RPTR) register
775 * This writes the EVQ_RPTR_REG register for the specified channel's
778 void ef4_farch_ev_read_ack(struct ef4_channel
*channel
)
781 struct ef4_nic
*efx
= channel
->efx
;
783 EF4_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
784 channel
->eventq_read_ptr
& channel
->eventq_mask
);
786 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
787 * of 4 bytes, but it is really 16 bytes just like later revisions.
789 ef4_writed(efx
, ®
,
790 efx
->type
->evq_rptr_tbl_base
+
791 FR_BZ_EVQ_RPTR_STEP
* channel
->channel
);
794 /* Use HW to insert a SW defined event */
795 void ef4_farch_generate_event(struct ef4_nic
*efx
, unsigned int evq
,
798 ef4_oword_t drv_ev_reg
;
800 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
801 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
802 drv_ev_reg
.u32
[0] = event
->u32
[0];
803 drv_ev_reg
.u32
[1] = event
->u32
[1];
804 drv_ev_reg
.u32
[2] = 0;
805 drv_ev_reg
.u32
[3] = 0;
806 EF4_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, evq
);
807 ef4_writeo(efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
810 static void ef4_farch_magic_event(struct ef4_channel
*channel
, u32 magic
)
814 EF4_POPULATE_QWORD_2(event
, FSF_AZ_EV_CODE
,
815 FSE_AZ_EV_CODE_DRV_GEN_EV
,
816 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
817 ef4_farch_generate_event(channel
->efx
, channel
->channel
, &event
);
820 /* Handle a transmit completion event
822 * The NIC batches TX completion events; the message we receive is of
823 * the form "complete all TX events up to this index".
826 ef4_farch_handle_tx_event(struct ef4_channel
*channel
, ef4_qword_t
*event
)
828 unsigned int tx_ev_desc_ptr
;
829 unsigned int tx_ev_q_label
;
830 struct ef4_tx_queue
*tx_queue
;
831 struct ef4_nic
*efx
= channel
->efx
;
834 if (unlikely(READ_ONCE(efx
->reset_pending
)))
837 if (likely(EF4_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
838 /* Transmit completion */
839 tx_ev_desc_ptr
= EF4_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
840 tx_ev_q_label
= EF4_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
841 tx_queue
= ef4_channel_get_tx_queue(
842 channel
, tx_ev_q_label
% EF4_TXQ_TYPES
);
843 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
845 ef4_xmit_done(tx_queue
, tx_ev_desc_ptr
);
846 } else if (EF4_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
847 /* Rewrite the FIFO write pointer */
848 tx_ev_q_label
= EF4_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
849 tx_queue
= ef4_channel_get_tx_queue(
850 channel
, tx_ev_q_label
% EF4_TXQ_TYPES
);
852 netif_tx_lock(efx
->net_dev
);
853 ef4_farch_notify_tx_desc(tx_queue
);
854 netif_tx_unlock(efx
->net_dev
);
855 } else if (EF4_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
)) {
856 ef4_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
858 netif_err(efx
, tx_err
, efx
->net_dev
,
859 "channel %d unexpected TX event "
860 EF4_QWORD_FMT
"\n", channel
->channel
,
861 EF4_QWORD_VAL(*event
));
867 /* Detect errors included in the rx_evt_pkt_ok bit. */
868 static u16
ef4_farch_handle_rx_not_ok(struct ef4_rx_queue
*rx_queue
,
869 const ef4_qword_t
*event
)
871 struct ef4_channel
*channel
= ef4_rx_queue_channel(rx_queue
);
872 struct ef4_nic
*efx
= rx_queue
->efx
;
873 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
874 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
875 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
876 bool rx_ev_other_err
, rx_ev_pause_frm
;
877 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
878 unsigned rx_ev_pkt_type
;
880 rx_ev_hdr_type
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
881 rx_ev_mcast_pkt
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
882 rx_ev_tobe_disc
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
883 rx_ev_pkt_type
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
884 rx_ev_buf_owner_id_err
= EF4_QWORD_FIELD(*event
,
885 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
886 rx_ev_ip_hdr_chksum_err
= EF4_QWORD_FIELD(*event
,
887 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
888 rx_ev_tcp_udp_chksum_err
= EF4_QWORD_FIELD(*event
,
889 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
890 rx_ev_eth_crc_err
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
891 rx_ev_frm_trunc
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
892 rx_ev_drib_nib
= ((ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
) ?
893 0 : EF4_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
894 rx_ev_pause_frm
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
896 /* Every error apart from tobe_disc and pause_frm */
897 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
898 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
899 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
901 /* Count errors that are not in MAC stats. Ignore expected
902 * checksum errors during self-test. */
904 ++channel
->n_rx_frm_trunc
;
905 else if (rx_ev_tobe_disc
)
906 ++channel
->n_rx_tobe_disc
;
907 else if (!efx
->loopback_selftest
) {
908 if (rx_ev_ip_hdr_chksum_err
)
909 ++channel
->n_rx_ip_hdr_chksum_err
;
910 else if (rx_ev_tcp_udp_chksum_err
)
911 ++channel
->n_rx_tcp_udp_chksum_err
;
914 /* TOBE_DISC is expected on unicast mismatches; don't print out an
915 * error message. FRM_TRUNC indicates RXDP dropped the packet due
916 * to a FIFO overflow.
919 if (rx_ev_other_err
&& net_ratelimit()) {
920 netif_dbg(efx
, rx_err
, efx
->net_dev
,
921 " RX queue %d unexpected RX event "
922 EF4_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
923 ef4_rx_queue_index(rx_queue
), EF4_QWORD_VAL(*event
),
924 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
925 rx_ev_ip_hdr_chksum_err
?
926 " [IP_HDR_CHKSUM_ERR]" : "",
927 rx_ev_tcp_udp_chksum_err
?
928 " [TCP_UDP_CHKSUM_ERR]" : "",
929 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
930 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
931 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
932 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
933 rx_ev_pause_frm
? " [PAUSE]" : "");
937 /* The frame must be discarded if any of these are true. */
938 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
939 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
940 EF4_RX_PKT_DISCARD
: 0;
943 /* Handle receive events that are not in-order. Return true if this
944 * can be handled as a partial packet discard, false if it's more
948 ef4_farch_handle_rx_bad_index(struct ef4_rx_queue
*rx_queue
, unsigned index
)
950 struct ef4_channel
*channel
= ef4_rx_queue_channel(rx_queue
);
951 struct ef4_nic
*efx
= rx_queue
->efx
;
952 unsigned expected
, dropped
;
954 if (rx_queue
->scatter_n
&&
955 index
== ((rx_queue
->removed_count
+ rx_queue
->scatter_n
- 1) &
956 rx_queue
->ptr_mask
)) {
957 ++channel
->n_rx_nodesc_trunc
;
961 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
962 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
963 netif_info(efx
, rx_err
, efx
->net_dev
,
964 "dropped %d events (index=%d expected=%d)\n",
965 dropped
, index
, expected
);
967 ef4_schedule_reset(efx
, EF4_WORKAROUND_5676(efx
) ?
968 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
972 /* Handle a packet received event
974 * The NIC gives a "discard" flag if it's a unicast packet with the
975 * wrong destination address
976 * Also "is multicast" and "matches multicast filter" flags can be used to
977 * discard non-matching multicast packets.
980 ef4_farch_handle_rx_event(struct ef4_channel
*channel
, const ef4_qword_t
*event
)
982 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
983 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
984 unsigned expected_ptr
;
985 bool rx_ev_pkt_ok
, rx_ev_sop
, rx_ev_cont
;
987 struct ef4_rx_queue
*rx_queue
;
988 struct ef4_nic
*efx
= channel
->efx
;
990 if (unlikely(READ_ONCE(efx
->reset_pending
)))
993 rx_ev_cont
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
);
994 rx_ev_sop
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
);
995 WARN_ON(EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
998 rx_queue
= ef4_channel_get_rx_queue(channel
);
1000 rx_ev_desc_ptr
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
1001 expected_ptr
= ((rx_queue
->removed_count
+ rx_queue
->scatter_n
) &
1002 rx_queue
->ptr_mask
);
1004 /* Check for partial drops and other errors */
1005 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
) ||
1006 unlikely(rx_ev_sop
!= (rx_queue
->scatter_n
== 0))) {
1007 if (rx_ev_desc_ptr
!= expected_ptr
&&
1008 !ef4_farch_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
))
1011 /* Discard all pending fragments */
1012 if (rx_queue
->scatter_n
) {
1015 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1016 rx_queue
->scatter_n
, 0, EF4_RX_PKT_DISCARD
);
1017 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1018 rx_queue
->scatter_n
= 0;
1021 /* Return if there is no new fragment */
1022 if (rx_ev_desc_ptr
!= expected_ptr
)
1025 /* Discard new fragment if not SOP */
1029 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1030 1, 0, EF4_RX_PKT_DISCARD
);
1031 ++rx_queue
->removed_count
;
1036 ++rx_queue
->scatter_n
;
1040 rx_ev_byte_cnt
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
1041 rx_ev_pkt_ok
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
1042 rx_ev_hdr_type
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
1044 if (likely(rx_ev_pkt_ok
)) {
1045 /* If packet is marked as OK then we can rely on the
1046 * hardware checksum and classification.
1049 switch (rx_ev_hdr_type
) {
1050 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
:
1051 flags
|= EF4_RX_PKT_TCP
;
1053 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
:
1054 flags
|= EF4_RX_PKT_CSUMMED
;
1056 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER
:
1057 case FSE_AZ_RX_EV_HDR_TYPE_OTHER
:
1061 flags
= ef4_farch_handle_rx_not_ok(rx_queue
, event
);
1064 /* Detect multicast packets that didn't match the filter */
1065 rx_ev_mcast_pkt
= EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
1066 if (rx_ev_mcast_pkt
) {
1067 unsigned int rx_ev_mcast_hash_match
=
1068 EF4_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
1070 if (unlikely(!rx_ev_mcast_hash_match
)) {
1071 ++channel
->n_rx_mcast_mismatch
;
1072 flags
|= EF4_RX_PKT_DISCARD
;
1076 channel
->irq_mod_score
+= 2;
1078 /* Handle received packet */
1079 ef4_rx_packet(rx_queue
,
1080 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1081 rx_queue
->scatter_n
, rx_ev_byte_cnt
, flags
);
1082 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1083 rx_queue
->scatter_n
= 0;
1086 /* If this flush done event corresponds to a &struct ef4_tx_queue, then
1087 * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1088 * of all transmit completions.
1091 ef4_farch_handle_tx_flush_done(struct ef4_nic
*efx
, ef4_qword_t
*event
)
1093 struct ef4_tx_queue
*tx_queue
;
1096 qid
= EF4_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1097 if (qid
< EF4_TXQ_TYPES
* efx
->n_tx_channels
) {
1098 tx_queue
= ef4_get_tx_queue(efx
, qid
/ EF4_TXQ_TYPES
,
1099 qid
% EF4_TXQ_TYPES
);
1100 if (atomic_cmpxchg(&tx_queue
->flush_outstanding
, 1, 0)) {
1101 ef4_farch_magic_event(tx_queue
->channel
,
1102 EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue
));
1107 /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
1108 * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1109 * the RX queue back to the mask of RX queues in need of flushing.
1112 ef4_farch_handle_rx_flush_done(struct ef4_nic
*efx
, ef4_qword_t
*event
)
1114 struct ef4_channel
*channel
;
1115 struct ef4_rx_queue
*rx_queue
;
1119 qid
= EF4_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1120 failed
= EF4_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1121 if (qid
>= efx
->n_channels
)
1123 channel
= ef4_get_channel(efx
, qid
);
1124 if (!ef4_channel_has_rx_queue(channel
))
1126 rx_queue
= ef4_channel_get_rx_queue(channel
);
1129 netif_info(efx
, hw
, efx
->net_dev
,
1130 "RXQ %d flush retry\n", qid
);
1131 rx_queue
->flush_pending
= true;
1132 atomic_inc(&efx
->rxq_flush_pending
);
1134 ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue
),
1135 EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue
));
1137 atomic_dec(&efx
->rxq_flush_outstanding
);
1138 if (ef4_farch_flush_wake(efx
))
1139 wake_up(&efx
->flush_wq
);
1143 ef4_farch_handle_drain_event(struct ef4_channel
*channel
)
1145 struct ef4_nic
*efx
= channel
->efx
;
1147 WARN_ON(atomic_read(&efx
->active_queues
) == 0);
1148 atomic_dec(&efx
->active_queues
);
1149 if (ef4_farch_flush_wake(efx
))
1150 wake_up(&efx
->flush_wq
);
1153 static void ef4_farch_handle_generated_event(struct ef4_channel
*channel
,
1156 struct ef4_nic
*efx
= channel
->efx
;
1157 struct ef4_rx_queue
*rx_queue
=
1158 ef4_channel_has_rx_queue(channel
) ?
1159 ef4_channel_get_rx_queue(channel
) : NULL
;
1160 unsigned magic
, code
;
1162 magic
= EF4_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1163 code
= _EF4_CHANNEL_MAGIC_CODE(magic
);
1165 if (magic
== EF4_CHANNEL_MAGIC_TEST(channel
)) {
1166 channel
->event_test_cpu
= raw_smp_processor_id();
1167 } else if (rx_queue
&& magic
== EF4_CHANNEL_MAGIC_FILL(rx_queue
)) {
1168 /* The queue must be empty, so we won't receive any rx
1169 * events, so ef4_process_channel() won't refill the
1170 * queue. Refill it here */
1171 ef4_fast_push_rx_descriptors(rx_queue
, true);
1172 } else if (rx_queue
&& magic
== EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue
)) {
1173 ef4_farch_handle_drain_event(channel
);
1174 } else if (code
== _EF4_CHANNEL_MAGIC_TX_DRAIN
) {
1175 ef4_farch_handle_drain_event(channel
);
1177 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
1178 "generated event "EF4_QWORD_FMT
"\n",
1179 channel
->channel
, EF4_QWORD_VAL(*event
));
1184 ef4_farch_handle_driver_event(struct ef4_channel
*channel
, ef4_qword_t
*event
)
1186 struct ef4_nic
*efx
= channel
->efx
;
1187 unsigned int ev_sub_code
;
1188 unsigned int ev_sub_data
;
1190 ev_sub_code
= EF4_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
1191 ev_sub_data
= EF4_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1193 switch (ev_sub_code
) {
1194 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
1195 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
1196 channel
->channel
, ev_sub_data
);
1197 ef4_farch_handle_tx_flush_done(efx
, event
);
1199 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
1200 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
1201 channel
->channel
, ev_sub_data
);
1202 ef4_farch_handle_rx_flush_done(efx
, event
);
1204 case FSE_AZ_EVQ_INIT_DONE_EV
:
1205 netif_dbg(efx
, hw
, efx
->net_dev
,
1206 "channel %d EVQ %d initialised\n",
1207 channel
->channel
, ev_sub_data
);
1209 case FSE_AZ_SRM_UPD_DONE_EV
:
1210 netif_vdbg(efx
, hw
, efx
->net_dev
,
1211 "channel %d SRAM update done\n", channel
->channel
);
1213 case FSE_AZ_WAKE_UP_EV
:
1214 netif_vdbg(efx
, hw
, efx
->net_dev
,
1215 "channel %d RXQ %d wakeup event\n",
1216 channel
->channel
, ev_sub_data
);
1218 case FSE_AZ_TIMER_EV
:
1219 netif_vdbg(efx
, hw
, efx
->net_dev
,
1220 "channel %d RX queue %d timer expired\n",
1221 channel
->channel
, ev_sub_data
);
1223 case FSE_AA_RX_RECOVER_EV
:
1224 netif_err(efx
, rx_err
, efx
->net_dev
,
1225 "channel %d seen DRIVER RX_RESET event. "
1226 "Resetting.\n", channel
->channel
);
1227 atomic_inc(&efx
->rx_reset
);
1228 ef4_schedule_reset(efx
,
1229 EF4_WORKAROUND_6555(efx
) ?
1230 RESET_TYPE_RX_RECOVERY
:
1231 RESET_TYPE_DISABLE
);
1233 case FSE_BZ_RX_DSC_ERROR_EV
:
1234 netif_err(efx
, rx_err
, efx
->net_dev
,
1235 "RX DMA Q %d reports descriptor fetch error."
1236 " RX Q %d is disabled.\n", ev_sub_data
,
1238 ef4_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
1240 case FSE_BZ_TX_DSC_ERROR_EV
:
1241 netif_err(efx
, tx_err
, efx
->net_dev
,
1242 "TX DMA Q %d reports descriptor fetch error."
1243 " TX Q %d is disabled.\n", ev_sub_data
,
1245 ef4_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
1248 netif_vdbg(efx
, hw
, efx
->net_dev
,
1249 "channel %d unknown driver event code %d "
1250 "data %04x\n", channel
->channel
, ev_sub_code
,
1256 int ef4_farch_ev_process(struct ef4_channel
*channel
, int budget
)
1258 struct ef4_nic
*efx
= channel
->efx
;
1259 unsigned int read_ptr
;
1260 ef4_qword_t event
, *p_event
;
1268 read_ptr
= channel
->eventq_read_ptr
;
1271 p_event
= ef4_event(channel
, read_ptr
);
1274 if (!ef4_event_present(&event
))
1278 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1279 "channel %d event is "EF4_QWORD_FMT
"\n",
1280 channel
->channel
, EF4_QWORD_VAL(event
));
1282 /* Clear this event by marking it all ones */
1283 EF4_SET_QWORD(*p_event
);
1287 ev_code
= EF4_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1290 case FSE_AZ_EV_CODE_RX_EV
:
1291 ef4_farch_handle_rx_event(channel
, &event
);
1292 if (++spent
== budget
)
1295 case FSE_AZ_EV_CODE_TX_EV
:
1296 tx_packets
+= ef4_farch_handle_tx_event(channel
,
1298 if (tx_packets
> efx
->txq_entries
) {
1303 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1304 ef4_farch_handle_generated_event(channel
, &event
);
1306 case FSE_AZ_EV_CODE_DRIVER_EV
:
1307 ef4_farch_handle_driver_event(channel
, &event
);
1309 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1310 if (efx
->type
->handle_global_event
&&
1311 efx
->type
->handle_global_event(channel
, &event
))
1313 /* else fall through */
1315 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1316 "channel %d unknown event type %d (data "
1317 EF4_QWORD_FMT
")\n", channel
->channel
,
1318 ev_code
, EF4_QWORD_VAL(event
));
1323 channel
->eventq_read_ptr
= read_ptr
;
1327 /* Allocate buffer table entries for event queue */
1328 int ef4_farch_ev_probe(struct ef4_channel
*channel
)
1330 struct ef4_nic
*efx
= channel
->efx
;
1333 entries
= channel
->eventq_mask
+ 1;
1334 return ef4_alloc_special_buffer(efx
, &channel
->eventq
,
1335 entries
* sizeof(ef4_qword_t
));
1338 int ef4_farch_ev_init(struct ef4_channel
*channel
)
1341 struct ef4_nic
*efx
= channel
->efx
;
1343 netif_dbg(efx
, hw
, efx
->net_dev
,
1344 "channel %d event queue in special buffers %d-%d\n",
1345 channel
->channel
, channel
->eventq
.index
,
1346 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1348 /* Pin event queue buffer */
1349 ef4_init_special_buffer(efx
, &channel
->eventq
);
1351 /* Fill event queue with all ones (i.e. empty events) */
1352 memset(channel
->eventq
.buf
.addr
, 0xff, channel
->eventq
.buf
.len
);
1354 /* Push event queue to card */
1355 EF4_POPULATE_OWORD_3(reg
,
1357 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1358 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1359 ef4_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1365 void ef4_farch_ev_fini(struct ef4_channel
*channel
)
1368 struct ef4_nic
*efx
= channel
->efx
;
1370 /* Remove event queue from card */
1371 EF4_ZERO_OWORD(reg
);
1372 ef4_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1375 /* Unpin event queue */
1376 ef4_fini_special_buffer(efx
, &channel
->eventq
);
1379 /* Free buffers backing event queue */
1380 void ef4_farch_ev_remove(struct ef4_channel
*channel
)
1382 ef4_free_special_buffer(channel
->efx
, &channel
->eventq
);
1386 void ef4_farch_ev_test_generate(struct ef4_channel
*channel
)
1388 ef4_farch_magic_event(channel
, EF4_CHANNEL_MAGIC_TEST(channel
));
1391 void ef4_farch_rx_defer_refill(struct ef4_rx_queue
*rx_queue
)
1393 ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue
),
1394 EF4_CHANNEL_MAGIC_FILL(rx_queue
));
1397 /**************************************************************************
1399 * Hardware interrupts
1400 * The hardware interrupt handler does very little work; all the event
1401 * queue processing is carried out by per-channel tasklets.
1403 **************************************************************************/
1405 /* Enable/disable/generate interrupts */
1406 static inline void ef4_farch_interrupts(struct ef4_nic
*efx
,
1407 bool enabled
, bool force
)
1409 ef4_oword_t int_en_reg_ker
;
1411 EF4_POPULATE_OWORD_3(int_en_reg_ker
,
1412 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1413 FRF_AZ_KER_INT_KER
, force
,
1414 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1415 ef4_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1418 void ef4_farch_irq_enable_master(struct ef4_nic
*efx
)
1420 EF4_ZERO_OWORD(*((ef4_oword_t
*) efx
->irq_status
.addr
));
1421 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1423 ef4_farch_interrupts(efx
, true, false);
1426 void ef4_farch_irq_disable_master(struct ef4_nic
*efx
)
1428 /* Disable interrupts */
1429 ef4_farch_interrupts(efx
, false, false);
1432 /* Generate a test interrupt
1433 * Interrupt must already have been enabled, otherwise nasty things
1436 int ef4_farch_irq_test_generate(struct ef4_nic
*efx
)
1438 ef4_farch_interrupts(efx
, true, true);
1442 /* Process a fatal interrupt
1443 * Disable bus mastering ASAP and schedule a reset
1445 irqreturn_t
ef4_farch_fatal_interrupt(struct ef4_nic
*efx
)
1447 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1448 ef4_oword_t
*int_ker
= efx
->irq_status
.addr
;
1449 ef4_oword_t fatal_intr
;
1450 int error
, mem_perr
;
1452 ef4_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1453 error
= EF4_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1455 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EF4_OWORD_FMT
" status "
1456 EF4_OWORD_FMT
": %s\n", EF4_OWORD_VAL(*int_ker
),
1457 EF4_OWORD_VAL(fatal_intr
),
1458 error
? "disabling bus mastering" : "no recognised error");
1460 /* If this is a memory parity error dump which blocks are offending */
1461 mem_perr
= (EF4_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1462 EF4_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1465 ef4_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1466 netif_err(efx
, hw
, efx
->net_dev
,
1467 "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT
"\n",
1468 EF4_OWORD_VAL(reg
));
1471 /* Disable both devices */
1472 pci_clear_master(efx
->pci_dev
);
1473 if (ef4_nic_is_dual_func(efx
))
1474 pci_clear_master(nic_data
->pci_dev2
);
1475 ef4_farch_irq_disable_master(efx
);
1477 /* Count errors and reset or disable the NIC accordingly */
1478 if (efx
->int_error_count
== 0 ||
1479 time_after(jiffies
, efx
->int_error_expire
)) {
1480 efx
->int_error_count
= 0;
1481 efx
->int_error_expire
=
1482 jiffies
+ EF4_INT_ERROR_EXPIRE
* HZ
;
1484 if (++efx
->int_error_count
< EF4_MAX_INT_ERRORS
) {
1485 netif_err(efx
, hw
, efx
->net_dev
,
1486 "SYSTEM ERROR - reset scheduled\n");
1487 ef4_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1489 netif_err(efx
, hw
, efx
->net_dev
,
1490 "SYSTEM ERROR - max number of errors seen."
1491 "NIC will be disabled\n");
1492 ef4_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1498 /* Handle a legacy interrupt
1499 * Acknowledges the interrupt and schedule event queue processing.
1501 irqreturn_t
ef4_farch_legacy_interrupt(int irq
, void *dev_id
)
1503 struct ef4_nic
*efx
= dev_id
;
1504 bool soft_enabled
= READ_ONCE(efx
->irq_soft_enabled
);
1505 ef4_oword_t
*int_ker
= efx
->irq_status
.addr
;
1506 irqreturn_t result
= IRQ_NONE
;
1507 struct ef4_channel
*channel
;
1512 /* Read the ISR which also ACKs the interrupts */
1513 ef4_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1514 queues
= EF4_EXTRACT_DWORD(reg
, 0, 31);
1516 /* Legacy interrupts are disabled too late by the EEH kernel
1517 * code. Disable them earlier.
1518 * If an EEH error occurred, the read will have returned all ones.
1520 if (EF4_DWORD_IS_ALL_ONES(reg
) && ef4_try_recovery(efx
) &&
1521 !efx
->eeh_disabled_legacy_irq
) {
1522 disable_irq_nosync(efx
->legacy_irq
);
1523 efx
->eeh_disabled_legacy_irq
= true;
1526 /* Handle non-event-queue sources */
1527 if (queues
& (1U << efx
->irq_level
) && soft_enabled
) {
1528 syserr
= EF4_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1529 if (unlikely(syserr
))
1530 return ef4_farch_fatal_interrupt(efx
);
1531 efx
->last_irq_cpu
= raw_smp_processor_id();
1535 efx
->irq_zero_count
= 0;
1537 /* Schedule processing of any interrupting queues */
1538 if (likely(soft_enabled
)) {
1539 ef4_for_each_channel(channel
, efx
) {
1541 ef4_schedule_channel_irq(channel
);
1545 result
= IRQ_HANDLED
;
1550 /* Legacy ISR read can return zero once (SF bug 15783) */
1552 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1553 * because this might be a shared interrupt. */
1554 if (efx
->irq_zero_count
++ == 0)
1555 result
= IRQ_HANDLED
;
1557 /* Ensure we schedule or rearm all event queues */
1558 if (likely(soft_enabled
)) {
1559 ef4_for_each_channel(channel
, efx
) {
1560 event
= ef4_event(channel
,
1561 channel
->eventq_read_ptr
);
1562 if (ef4_event_present(event
))
1563 ef4_schedule_channel_irq(channel
);
1565 ef4_farch_ev_read_ack(channel
);
1570 if (result
== IRQ_HANDLED
)
1571 netif_vdbg(efx
, intr
, efx
->net_dev
,
1572 "IRQ %d on CPU %d status " EF4_DWORD_FMT
"\n",
1573 irq
, raw_smp_processor_id(), EF4_DWORD_VAL(reg
));
1578 /* Handle an MSI interrupt
1580 * Handle an MSI hardware interrupt. This routine schedules event
1581 * queue processing. No interrupt acknowledgement cycle is necessary.
1582 * Also, we never need to check that the interrupt is for us, since
1583 * MSI interrupts cannot be shared.
1585 irqreturn_t
ef4_farch_msi_interrupt(int irq
, void *dev_id
)
1587 struct ef4_msi_context
*context
= dev_id
;
1588 struct ef4_nic
*efx
= context
->efx
;
1589 ef4_oword_t
*int_ker
= efx
->irq_status
.addr
;
1592 netif_vdbg(efx
, intr
, efx
->net_dev
,
1593 "IRQ %d on CPU %d status " EF4_OWORD_FMT
"\n",
1594 irq
, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker
));
1596 if (!likely(READ_ONCE(efx
->irq_soft_enabled
)))
1599 /* Handle non-event-queue sources */
1600 if (context
->index
== efx
->irq_level
) {
1601 syserr
= EF4_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1602 if (unlikely(syserr
))
1603 return ef4_farch_fatal_interrupt(efx
);
1604 efx
->last_irq_cpu
= raw_smp_processor_id();
1607 /* Schedule processing of the channel */
1608 ef4_schedule_channel_irq(efx
->channel
[context
->index
]);
1613 /* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ
1616 void ef4_farch_rx_push_indir_table(struct ef4_nic
*efx
)
1621 BUG_ON(ef4_nic_rev(efx
) < EF4_REV_FALCON_B0
);
1623 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1624 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1626 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1627 EF4_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1628 efx
->rx_indir_table
[i
]);
1629 ef4_writed(efx
, &dword
,
1630 FR_BZ_RX_INDIRECTION_TBL
+
1631 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1635 /* Looks at available SRAM resources and works out how many queues we
1636 * can support, and where things like descriptor caches should live.
1638 * SRAM is split up as follows:
1639 * 0 buftbl entries for channels
1640 * efx->vf_buftbl_base buftbl entries for SR-IOV
1641 * efx->rx_dc_base RX descriptor caches
1642 * efx->tx_dc_base TX descriptor caches
1644 void ef4_farch_dimension_resources(struct ef4_nic
*efx
, unsigned sram_lim_qw
)
1646 unsigned vi_count
, buftbl_min
;
1648 /* Account for the buffer table entries backing the datapath channels
1649 * and the descriptor caches for those channels.
1651 buftbl_min
= ((efx
->n_rx_channels
* EF4_MAX_DMAQ_SIZE
+
1652 efx
->n_tx_channels
* EF4_TXQ_TYPES
* EF4_MAX_DMAQ_SIZE
+
1653 efx
->n_channels
* EF4_MAX_EVQ_SIZE
)
1654 * sizeof(ef4_qword_t
) / EF4_BUF_SIZE
);
1655 vi_count
= max(efx
->n_channels
, efx
->n_tx_channels
* EF4_TXQ_TYPES
);
1657 efx
->tx_dc_base
= sram_lim_qw
- vi_count
* TX_DC_ENTRIES
;
1658 efx
->rx_dc_base
= efx
->tx_dc_base
- vi_count
* RX_DC_ENTRIES
;
1661 u32
ef4_farch_fpga_ver(struct ef4_nic
*efx
)
1663 ef4_oword_t altera_build
;
1664 ef4_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1665 return EF4_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1668 void ef4_farch_init_common(struct ef4_nic
*efx
)
1672 /* Set positions of descriptor caches in SRAM. */
1673 EF4_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, efx
->tx_dc_base
);
1674 ef4_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1675 EF4_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, efx
->rx_dc_base
);
1676 ef4_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1678 /* Set TX descriptor cache size. */
1679 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1680 EF4_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1681 ef4_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1683 /* Set RX descriptor cache size. Set low watermark to size-8, as
1684 * this allows most efficient prefetching.
1686 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1687 EF4_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1688 ef4_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1689 EF4_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1690 ef4_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1692 /* Program INT_KER address */
1693 EF4_POPULATE_OWORD_2(temp
,
1694 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1695 EF4_INT_MODE_USE_MSI(efx
),
1696 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1697 ef4_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1699 /* Use a valid MSI-X vector */
1702 /* Enable all the genuinely fatal interrupts. (They are still
1703 * masked by the overall interrupt mask, controlled by
1704 * falcon_interrupts()).
1706 * Note: All other fatal interrupts are enabled
1708 EF4_POPULATE_OWORD_3(temp
,
1709 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1710 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1711 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1712 EF4_INVERT_OWORD(temp
);
1713 ef4_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1715 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1716 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1718 ef4_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1719 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1720 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1721 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1722 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1723 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1724 /* Enable SW_EV to inherit in char driver - assume harmless here */
1725 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1726 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1727 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1728 /* Disable hardware watchdog which can misfire */
1729 EF4_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1730 /* Squash TX of packets of 16 bytes or less */
1731 if (ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
)
1732 EF4_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1733 ef4_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1735 if (ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
) {
1736 EF4_POPULATE_OWORD_4(temp
,
1737 /* Default values */
1738 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1739 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1740 FRF_BZ_TX_PACE_FB_BASE
, 0,
1741 /* Allow large pace values in the
1743 FRF_BZ_TX_PACE_BIN_TH
,
1744 FFE_BZ_TX_PACE_RESERVED
);
1745 ef4_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1749 /**************************************************************************
1753 **************************************************************************
1756 /* "Fudge factors" - difference between programmed value and actual depth.
1757 * Due to pipelined implementation we need to program H/W with a value that
1758 * is larger than the hop limit we want.
1760 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1761 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1763 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1764 * We also need to avoid infinite loops in ef4_farch_filter_search() when the
1767 #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
1769 /* Don't try very hard to find space for performance hints, as this is
1770 * counter-productive. */
1771 #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1773 enum ef4_farch_filter_type
{
1774 EF4_FARCH_FILTER_TCP_FULL
= 0,
1775 EF4_FARCH_FILTER_TCP_WILD
,
1776 EF4_FARCH_FILTER_UDP_FULL
,
1777 EF4_FARCH_FILTER_UDP_WILD
,
1778 EF4_FARCH_FILTER_MAC_FULL
= 4,
1779 EF4_FARCH_FILTER_MAC_WILD
,
1780 EF4_FARCH_FILTER_UC_DEF
= 8,
1781 EF4_FARCH_FILTER_MC_DEF
,
1782 EF4_FARCH_FILTER_TYPE_COUNT
, /* number of specific types */
1785 enum ef4_farch_filter_table_id
{
1786 EF4_FARCH_FILTER_TABLE_RX_IP
= 0,
1787 EF4_FARCH_FILTER_TABLE_RX_MAC
,
1788 EF4_FARCH_FILTER_TABLE_RX_DEF
,
1789 EF4_FARCH_FILTER_TABLE_TX_MAC
,
1790 EF4_FARCH_FILTER_TABLE_COUNT
,
1793 enum ef4_farch_filter_index
{
1794 EF4_FARCH_FILTER_INDEX_UC_DEF
,
1795 EF4_FARCH_FILTER_INDEX_MC_DEF
,
1796 EF4_FARCH_FILTER_SIZE_RX_DEF
,
1799 struct ef4_farch_filter_spec
{
1807 struct ef4_farch_filter_table
{
1808 enum ef4_farch_filter_table_id id
;
1809 u32 offset
; /* address of table relative to BAR */
1810 unsigned size
; /* number of entries */
1811 unsigned step
; /* step between entries */
1812 unsigned used
; /* number currently used */
1813 unsigned long *used_bitmap
;
1814 struct ef4_farch_filter_spec
*spec
;
1815 unsigned search_limit
[EF4_FARCH_FILTER_TYPE_COUNT
];
1818 struct ef4_farch_filter_state
{
1819 struct ef4_farch_filter_table table
[EF4_FARCH_FILTER_TABLE_COUNT
];
1823 ef4_farch_filter_table_clear_entry(struct ef4_nic
*efx
,
1824 struct ef4_farch_filter_table
*table
,
1825 unsigned int filter_idx
);
1827 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1828 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1829 static u16
ef4_farch_filter_hash(u32 key
)
1833 /* First 16 rounds */
1834 tmp
= 0x1fff ^ key
>> 16;
1835 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1836 tmp
= tmp
^ tmp
>> 9;
1837 /* Last 16 rounds */
1838 tmp
= tmp
^ tmp
<< 13 ^ key
;
1839 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1840 return tmp
^ tmp
>> 9;
1843 /* To allow for hash collisions, filter search continues at these
1844 * increments from the first possible entry selected by the hash. */
1845 static u16
ef4_farch_filter_increment(u32 key
)
1850 static enum ef4_farch_filter_table_id
1851 ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec
*spec
)
1853 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP
!=
1854 (EF4_FARCH_FILTER_TCP_FULL
>> 2));
1855 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP
!=
1856 (EF4_FARCH_FILTER_TCP_WILD
>> 2));
1857 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP
!=
1858 (EF4_FARCH_FILTER_UDP_FULL
>> 2));
1859 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP
!=
1860 (EF4_FARCH_FILTER_UDP_WILD
>> 2));
1861 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC
!=
1862 (EF4_FARCH_FILTER_MAC_FULL
>> 2));
1863 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC
!=
1864 (EF4_FARCH_FILTER_MAC_WILD
>> 2));
1865 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC
!=
1866 EF4_FARCH_FILTER_TABLE_RX_MAC
+ 2);
1867 return (spec
->type
>> 2) + ((spec
->flags
& EF4_FILTER_FLAG_TX
) ? 2 : 0);
1870 static void ef4_farch_filter_push_rx_config(struct ef4_nic
*efx
)
1872 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
1873 struct ef4_farch_filter_table
*table
;
1874 ef4_oword_t filter_ctl
;
1876 ef4_reado(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
1878 table
= &state
->table
[EF4_FARCH_FILTER_TABLE_RX_IP
];
1879 EF4_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_FULL_SRCH_LIMIT
,
1880 table
->search_limit
[EF4_FARCH_FILTER_TCP_FULL
] +
1881 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1882 EF4_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_WILD_SRCH_LIMIT
,
1883 table
->search_limit
[EF4_FARCH_FILTER_TCP_WILD
] +
1884 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1885 EF4_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_FULL_SRCH_LIMIT
,
1886 table
->search_limit
[EF4_FARCH_FILTER_UDP_FULL
] +
1887 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1888 EF4_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_WILD_SRCH_LIMIT
,
1889 table
->search_limit
[EF4_FARCH_FILTER_UDP_WILD
] +
1890 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1892 table
= &state
->table
[EF4_FARCH_FILTER_TABLE_RX_MAC
];
1894 EF4_SET_OWORD_FIELD(
1895 filter_ctl
, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT
,
1896 table
->search_limit
[EF4_FARCH_FILTER_MAC_FULL
] +
1897 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1898 EF4_SET_OWORD_FIELD(
1899 filter_ctl
, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT
,
1900 table
->search_limit
[EF4_FARCH_FILTER_MAC_WILD
] +
1901 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1904 table
= &state
->table
[EF4_FARCH_FILTER_TABLE_RX_DEF
];
1906 EF4_SET_OWORD_FIELD(
1907 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_Q_ID
,
1908 table
->spec
[EF4_FARCH_FILTER_INDEX_UC_DEF
].dmaq_id
);
1909 EF4_SET_OWORD_FIELD(
1910 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED
,
1911 !!(table
->spec
[EF4_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1912 EF4_FILTER_FLAG_RX_RSS
));
1913 EF4_SET_OWORD_FIELD(
1914 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_Q_ID
,
1915 table
->spec
[EF4_FARCH_FILTER_INDEX_MC_DEF
].dmaq_id
);
1916 EF4_SET_OWORD_FIELD(
1917 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED
,
1918 !!(table
->spec
[EF4_FARCH_FILTER_INDEX_MC_DEF
].flags
&
1919 EF4_FILTER_FLAG_RX_RSS
));
1921 /* There is a single bit to enable RX scatter for all
1922 * unmatched packets. Only set it if scatter is
1923 * enabled in both filter specs.
1925 EF4_SET_OWORD_FIELD(
1926 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
1927 !!(table
->spec
[EF4_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1928 table
->spec
[EF4_FARCH_FILTER_INDEX_MC_DEF
].flags
&
1929 EF4_FILTER_FLAG_RX_SCATTER
));
1930 } else if (ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
) {
1931 /* We don't expose 'default' filters because unmatched
1932 * packets always go to the queue number found in the
1933 * RSS table. But we still need to set the RX scatter
1936 EF4_SET_OWORD_FIELD(
1937 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
1941 ef4_writeo(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
1944 static void ef4_farch_filter_push_tx_limits(struct ef4_nic
*efx
)
1946 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
1947 struct ef4_farch_filter_table
*table
;
1950 ef4_reado(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
1952 table
= &state
->table
[EF4_FARCH_FILTER_TABLE_TX_MAC
];
1954 EF4_SET_OWORD_FIELD(
1955 tx_cfg
, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE
,
1956 table
->search_limit
[EF4_FARCH_FILTER_MAC_FULL
] +
1957 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1958 EF4_SET_OWORD_FIELD(
1959 tx_cfg
, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE
,
1960 table
->search_limit
[EF4_FARCH_FILTER_MAC_WILD
] +
1961 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1964 ef4_writeo(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
1968 ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec
*spec
,
1969 const struct ef4_filter_spec
*gen_spec
)
1971 bool is_full
= false;
1973 if ((gen_spec
->flags
& EF4_FILTER_FLAG_RX_RSS
) &&
1974 gen_spec
->rss_context
!= EF4_FILTER_RSS_CONTEXT_DEFAULT
)
1977 spec
->priority
= gen_spec
->priority
;
1978 spec
->flags
= gen_spec
->flags
;
1979 spec
->dmaq_id
= gen_spec
->dmaq_id
;
1981 switch (gen_spec
->match_flags
) {
1982 case (EF4_FILTER_MATCH_ETHER_TYPE
| EF4_FILTER_MATCH_IP_PROTO
|
1983 EF4_FILTER_MATCH_LOC_HOST
| EF4_FILTER_MATCH_LOC_PORT
|
1984 EF4_FILTER_MATCH_REM_HOST
| EF4_FILTER_MATCH_REM_PORT
):
1987 case (EF4_FILTER_MATCH_ETHER_TYPE
| EF4_FILTER_MATCH_IP_PROTO
|
1988 EF4_FILTER_MATCH_LOC_HOST
| EF4_FILTER_MATCH_LOC_PORT
): {
1989 __be32 rhost
, host1
, host2
;
1990 __be16 rport
, port1
, port2
;
1992 EF4_BUG_ON_PARANOID(!(gen_spec
->flags
& EF4_FILTER_FLAG_RX
));
1994 if (gen_spec
->ether_type
!= htons(ETH_P_IP
))
1995 return -EPROTONOSUPPORT
;
1996 if (gen_spec
->loc_port
== 0 ||
1997 (is_full
&& gen_spec
->rem_port
== 0))
1998 return -EADDRNOTAVAIL
;
1999 switch (gen_spec
->ip_proto
) {
2001 spec
->type
= (is_full
? EF4_FARCH_FILTER_TCP_FULL
:
2002 EF4_FARCH_FILTER_TCP_WILD
);
2005 spec
->type
= (is_full
? EF4_FARCH_FILTER_UDP_FULL
:
2006 EF4_FARCH_FILTER_UDP_WILD
);
2009 return -EPROTONOSUPPORT
;
2012 /* Filter is constructed in terms of source and destination,
2013 * with the odd wrinkle that the ports are swapped in a UDP
2014 * wildcard filter. We need to convert from local and remote
2015 * (= zero for wildcard) addresses.
2017 rhost
= is_full
? gen_spec
->rem_host
[0] : 0;
2018 rport
= is_full
? gen_spec
->rem_port
: 0;
2020 host2
= gen_spec
->loc_host
[0];
2021 if (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
) {
2022 port1
= gen_spec
->loc_port
;
2026 port2
= gen_spec
->loc_port
;
2028 spec
->data
[0] = ntohl(host1
) << 16 | ntohs(port1
);
2029 spec
->data
[1] = ntohs(port2
) << 16 | ntohl(host1
) >> 16;
2030 spec
->data
[2] = ntohl(host2
);
2035 case EF4_FILTER_MATCH_LOC_MAC
| EF4_FILTER_MATCH_OUTER_VID
:
2038 case EF4_FILTER_MATCH_LOC_MAC
:
2039 spec
->type
= (is_full
? EF4_FARCH_FILTER_MAC_FULL
:
2040 EF4_FARCH_FILTER_MAC_WILD
);
2041 spec
->data
[0] = is_full
? ntohs(gen_spec
->outer_vid
) : 0;
2042 spec
->data
[1] = (gen_spec
->loc_mac
[2] << 24 |
2043 gen_spec
->loc_mac
[3] << 16 |
2044 gen_spec
->loc_mac
[4] << 8 |
2045 gen_spec
->loc_mac
[5]);
2046 spec
->data
[2] = (gen_spec
->loc_mac
[0] << 8 |
2047 gen_spec
->loc_mac
[1]);
2050 case EF4_FILTER_MATCH_LOC_MAC_IG
:
2051 spec
->type
= (is_multicast_ether_addr(gen_spec
->loc_mac
) ?
2052 EF4_FARCH_FILTER_MC_DEF
:
2053 EF4_FARCH_FILTER_UC_DEF
);
2054 memset(spec
->data
, 0, sizeof(spec
->data
)); /* ensure equality */
2058 return -EPROTONOSUPPORT
;
2065 ef4_farch_filter_to_gen_spec(struct ef4_filter_spec
*gen_spec
,
2066 const struct ef4_farch_filter_spec
*spec
)
2068 bool is_full
= false;
2070 /* *gen_spec should be completely initialised, to be consistent
2071 * with ef4_filter_init_{rx,tx}() and in case we want to copy
2072 * it back to userland.
2074 memset(gen_spec
, 0, sizeof(*gen_spec
));
2076 gen_spec
->priority
= spec
->priority
;
2077 gen_spec
->flags
= spec
->flags
;
2078 gen_spec
->dmaq_id
= spec
->dmaq_id
;
2080 switch (spec
->type
) {
2081 case EF4_FARCH_FILTER_TCP_FULL
:
2082 case EF4_FARCH_FILTER_UDP_FULL
:
2085 case EF4_FARCH_FILTER_TCP_WILD
:
2086 case EF4_FARCH_FILTER_UDP_WILD
: {
2087 __be32 host1
, host2
;
2088 __be16 port1
, port2
;
2090 gen_spec
->match_flags
=
2091 EF4_FILTER_MATCH_ETHER_TYPE
|
2092 EF4_FILTER_MATCH_IP_PROTO
|
2093 EF4_FILTER_MATCH_LOC_HOST
| EF4_FILTER_MATCH_LOC_PORT
;
2095 gen_spec
->match_flags
|= (EF4_FILTER_MATCH_REM_HOST
|
2096 EF4_FILTER_MATCH_REM_PORT
);
2097 gen_spec
->ether_type
= htons(ETH_P_IP
);
2098 gen_spec
->ip_proto
=
2099 (spec
->type
== EF4_FARCH_FILTER_TCP_FULL
||
2100 spec
->type
== EF4_FARCH_FILTER_TCP_WILD
) ?
2101 IPPROTO_TCP
: IPPROTO_UDP
;
2103 host1
= htonl(spec
->data
[0] >> 16 | spec
->data
[1] << 16);
2104 port1
= htons(spec
->data
[0]);
2105 host2
= htonl(spec
->data
[2]);
2106 port2
= htons(spec
->data
[1] >> 16);
2107 if (spec
->flags
& EF4_FILTER_FLAG_TX
) {
2108 gen_spec
->loc_host
[0] = host1
;
2109 gen_spec
->rem_host
[0] = host2
;
2111 gen_spec
->loc_host
[0] = host2
;
2112 gen_spec
->rem_host
[0] = host1
;
2114 if (!!(gen_spec
->flags
& EF4_FILTER_FLAG_TX
) ^
2115 (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
)) {
2116 gen_spec
->loc_port
= port1
;
2117 gen_spec
->rem_port
= port2
;
2119 gen_spec
->loc_port
= port2
;
2120 gen_spec
->rem_port
= port1
;
2126 case EF4_FARCH_FILTER_MAC_FULL
:
2129 case EF4_FARCH_FILTER_MAC_WILD
:
2130 gen_spec
->match_flags
= EF4_FILTER_MATCH_LOC_MAC
;
2132 gen_spec
->match_flags
|= EF4_FILTER_MATCH_OUTER_VID
;
2133 gen_spec
->loc_mac
[0] = spec
->data
[2] >> 8;
2134 gen_spec
->loc_mac
[1] = spec
->data
[2];
2135 gen_spec
->loc_mac
[2] = spec
->data
[1] >> 24;
2136 gen_spec
->loc_mac
[3] = spec
->data
[1] >> 16;
2137 gen_spec
->loc_mac
[4] = spec
->data
[1] >> 8;
2138 gen_spec
->loc_mac
[5] = spec
->data
[1];
2139 gen_spec
->outer_vid
= htons(spec
->data
[0]);
2142 case EF4_FARCH_FILTER_UC_DEF
:
2143 case EF4_FARCH_FILTER_MC_DEF
:
2144 gen_spec
->match_flags
= EF4_FILTER_MATCH_LOC_MAC_IG
;
2145 gen_spec
->loc_mac
[0] = spec
->type
== EF4_FARCH_FILTER_MC_DEF
;
2155 ef4_farch_filter_init_rx_auto(struct ef4_nic
*efx
,
2156 struct ef4_farch_filter_spec
*spec
)
2158 /* If there's only one channel then disable RSS for non VF
2159 * traffic, thereby allowing VFs to use RSS when the PF can't.
2161 spec
->priority
= EF4_FILTER_PRI_AUTO
;
2162 spec
->flags
= (EF4_FILTER_FLAG_RX
|
2163 (ef4_rss_enabled(efx
) ? EF4_FILTER_FLAG_RX_RSS
: 0) |
2164 (efx
->rx_scatter
? EF4_FILTER_FLAG_RX_SCATTER
: 0));
2168 /* Build a filter entry and return its n-tuple key. */
2169 static u32
ef4_farch_filter_build(ef4_oword_t
*filter
,
2170 struct ef4_farch_filter_spec
*spec
)
2174 switch (ef4_farch_filter_spec_table_id(spec
)) {
2175 case EF4_FARCH_FILTER_TABLE_RX_IP
: {
2176 bool is_udp
= (spec
->type
== EF4_FARCH_FILTER_UDP_FULL
||
2177 spec
->type
== EF4_FARCH_FILTER_UDP_WILD
);
2178 EF4_POPULATE_OWORD_7(
2181 !!(spec
->flags
& EF4_FILTER_FLAG_RX_RSS
),
2183 !!(spec
->flags
& EF4_FILTER_FLAG_RX_SCATTER
),
2184 FRF_BZ_TCP_UDP
, is_udp
,
2185 FRF_BZ_RXQ_ID
, spec
->dmaq_id
,
2186 EF4_DWORD_2
, spec
->data
[2],
2187 EF4_DWORD_1
, spec
->data
[1],
2188 EF4_DWORD_0
, spec
->data
[0]);
2193 case EF4_FARCH_FILTER_TABLE_RX_MAC
: {
2194 bool is_wild
= spec
->type
== EF4_FARCH_FILTER_MAC_WILD
;
2195 EF4_POPULATE_OWORD_7(
2198 !!(spec
->flags
& EF4_FILTER_FLAG_RX_RSS
),
2199 FRF_CZ_RMFT_SCATTER_EN
,
2200 !!(spec
->flags
& EF4_FILTER_FLAG_RX_SCATTER
),
2201 FRF_CZ_RMFT_RXQ_ID
, spec
->dmaq_id
,
2202 FRF_CZ_RMFT_WILDCARD_MATCH
, is_wild
,
2203 FRF_CZ_RMFT_DEST_MAC_HI
, spec
->data
[2],
2204 FRF_CZ_RMFT_DEST_MAC_LO
, spec
->data
[1],
2205 FRF_CZ_RMFT_VLAN_ID
, spec
->data
[0]);
2210 case EF4_FARCH_FILTER_TABLE_TX_MAC
: {
2211 bool is_wild
= spec
->type
== EF4_FARCH_FILTER_MAC_WILD
;
2212 EF4_POPULATE_OWORD_5(*filter
,
2213 FRF_CZ_TMFT_TXQ_ID
, spec
->dmaq_id
,
2214 FRF_CZ_TMFT_WILDCARD_MATCH
, is_wild
,
2215 FRF_CZ_TMFT_SRC_MAC_HI
, spec
->data
[2],
2216 FRF_CZ_TMFT_SRC_MAC_LO
, spec
->data
[1],
2217 FRF_CZ_TMFT_VLAN_ID
, spec
->data
[0]);
2218 data3
= is_wild
| spec
->dmaq_id
<< 1;
2226 return spec
->data
[0] ^ spec
->data
[1] ^ spec
->data
[2] ^ data3
;
2229 static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec
*left
,
2230 const struct ef4_farch_filter_spec
*right
)
2232 if (left
->type
!= right
->type
||
2233 memcmp(left
->data
, right
->data
, sizeof(left
->data
)))
2236 if (left
->flags
& EF4_FILTER_FLAG_TX
&&
2237 left
->dmaq_id
!= right
->dmaq_id
)
2244 * Construct/deconstruct external filter IDs. At least the RX filter
2245 * IDs must be ordered by matching priority, for RX NFC semantics.
2247 * Deconstruction needs to be robust against invalid IDs so that
2248 * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
2249 * accept user-provided IDs.
2252 #define EF4_FARCH_FILTER_MATCH_PRI_COUNT 5
2254 static const u8 ef4_farch_filter_type_match_pri
[EF4_FARCH_FILTER_TYPE_COUNT
] = {
2255 [EF4_FARCH_FILTER_TCP_FULL
] = 0,
2256 [EF4_FARCH_FILTER_UDP_FULL
] = 0,
2257 [EF4_FARCH_FILTER_TCP_WILD
] = 1,
2258 [EF4_FARCH_FILTER_UDP_WILD
] = 1,
2259 [EF4_FARCH_FILTER_MAC_FULL
] = 2,
2260 [EF4_FARCH_FILTER_MAC_WILD
] = 3,
2261 [EF4_FARCH_FILTER_UC_DEF
] = 4,
2262 [EF4_FARCH_FILTER_MC_DEF
] = 4,
2265 static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table
[] = {
2266 EF4_FARCH_FILTER_TABLE_RX_IP
, /* RX match pri 0 */
2267 EF4_FARCH_FILTER_TABLE_RX_IP
,
2268 EF4_FARCH_FILTER_TABLE_RX_MAC
,
2269 EF4_FARCH_FILTER_TABLE_RX_MAC
,
2270 EF4_FARCH_FILTER_TABLE_RX_DEF
, /* RX match pri 4 */
2271 EF4_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 0 */
2272 EF4_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 1 */
2275 #define EF4_FARCH_FILTER_INDEX_WIDTH 13
2276 #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
2279 ef4_farch_filter_make_id(const struct ef4_farch_filter_spec
*spec
,
2284 range
= ef4_farch_filter_type_match_pri
[spec
->type
];
2285 if (!(spec
->flags
& EF4_FILTER_FLAG_RX
))
2286 range
+= EF4_FARCH_FILTER_MATCH_PRI_COUNT
;
2288 return range
<< EF4_FARCH_FILTER_INDEX_WIDTH
| index
;
2291 static inline enum ef4_farch_filter_table_id
2292 ef4_farch_filter_id_table_id(u32 id
)
2294 unsigned int range
= id
>> EF4_FARCH_FILTER_INDEX_WIDTH
;
2296 if (range
< ARRAY_SIZE(ef4_farch_filter_range_table
))
2297 return ef4_farch_filter_range_table
[range
];
2299 return EF4_FARCH_FILTER_TABLE_COUNT
; /* invalid */
2302 static inline unsigned int ef4_farch_filter_id_index(u32 id
)
2304 return id
& EF4_FARCH_FILTER_INDEX_MASK
;
2307 u32
ef4_farch_filter_get_rx_id_limit(struct ef4_nic
*efx
)
2309 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2310 unsigned int range
= EF4_FARCH_FILTER_MATCH_PRI_COUNT
- 1;
2311 enum ef4_farch_filter_table_id table_id
;
2314 table_id
= ef4_farch_filter_range_table
[range
];
2315 if (state
->table
[table_id
].size
!= 0)
2316 return range
<< EF4_FARCH_FILTER_INDEX_WIDTH
|
2317 state
->table
[table_id
].size
;
2323 s32
ef4_farch_filter_insert(struct ef4_nic
*efx
,
2324 struct ef4_filter_spec
*gen_spec
,
2327 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2328 struct ef4_farch_filter_table
*table
;
2329 struct ef4_farch_filter_spec spec
;
2331 int rep_index
, ins_index
;
2332 unsigned int depth
= 0;
2335 rc
= ef4_farch_filter_from_gen_spec(&spec
, gen_spec
);
2339 table
= &state
->table
[ef4_farch_filter_spec_table_id(&spec
)];
2340 if (table
->size
== 0)
2343 netif_vdbg(efx
, hw
, efx
->net_dev
,
2344 "%s: type %d search_limit=%d", __func__
, spec
.type
,
2345 table
->search_limit
[spec
.type
]);
2347 if (table
->id
== EF4_FARCH_FILTER_TABLE_RX_DEF
) {
2348 /* One filter spec per type */
2349 BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF
!= 0);
2350 BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF
!=
2351 EF4_FARCH_FILTER_MC_DEF
- EF4_FARCH_FILTER_UC_DEF
);
2352 rep_index
= spec
.type
- EF4_FARCH_FILTER_UC_DEF
;
2353 ins_index
= rep_index
;
2355 spin_lock_bh(&efx
->filter_lock
);
2357 /* Search concurrently for
2358 * (1) a filter to be replaced (rep_index): any filter
2359 * with the same match values, up to the current
2360 * search depth for this type, and
2361 * (2) the insertion point (ins_index): (1) or any
2362 * free slot before it or up to the maximum search
2363 * depth for this priority
2364 * We fail if we cannot find (2).
2366 * We can stop once either
2367 * (a) we find (1), in which case we have definitely
2368 * found (2) as well; or
2369 * (b) we have searched exhaustively for (1), and have
2370 * either found (2) or searched exhaustively for it
2372 u32 key
= ef4_farch_filter_build(&filter
, &spec
);
2373 unsigned int hash
= ef4_farch_filter_hash(key
);
2374 unsigned int incr
= ef4_farch_filter_increment(key
);
2375 unsigned int max_rep_depth
= table
->search_limit
[spec
.type
];
2376 unsigned int max_ins_depth
=
2377 spec
.priority
<= EF4_FILTER_PRI_HINT
?
2378 EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX
:
2379 EF4_FARCH_FILTER_CTL_SRCH_MAX
;
2380 unsigned int i
= hash
& (table
->size
- 1);
2385 spin_lock_bh(&efx
->filter_lock
);
2388 if (!test_bit(i
, table
->used_bitmap
)) {
2391 } else if (ef4_farch_filter_equal(&spec
,
2400 if (depth
>= max_rep_depth
&&
2401 (ins_index
>= 0 || depth
>= max_ins_depth
)) {
2403 if (ins_index
< 0) {
2411 i
= (i
+ incr
) & (table
->size
- 1);
2416 /* If we found a filter to be replaced, check whether we
2419 if (rep_index
>= 0) {
2420 struct ef4_farch_filter_spec
*saved_spec
=
2421 &table
->spec
[rep_index
];
2423 if (spec
.priority
== saved_spec
->priority
&& !replace_equal
) {
2427 if (spec
.priority
< saved_spec
->priority
) {
2431 if (saved_spec
->priority
== EF4_FILTER_PRI_AUTO
||
2432 saved_spec
->flags
& EF4_FILTER_FLAG_RX_OVER_AUTO
)
2433 spec
.flags
|= EF4_FILTER_FLAG_RX_OVER_AUTO
;
2436 /* Insert the filter */
2437 if (ins_index
!= rep_index
) {
2438 __set_bit(ins_index
, table
->used_bitmap
);
2441 table
->spec
[ins_index
] = spec
;
2443 if (table
->id
== EF4_FARCH_FILTER_TABLE_RX_DEF
) {
2444 ef4_farch_filter_push_rx_config(efx
);
2446 if (table
->search_limit
[spec
.type
] < depth
) {
2447 table
->search_limit
[spec
.type
] = depth
;
2448 if (spec
.flags
& EF4_FILTER_FLAG_TX
)
2449 ef4_farch_filter_push_tx_limits(efx
);
2451 ef4_farch_filter_push_rx_config(efx
);
2454 ef4_writeo(efx
, &filter
,
2455 table
->offset
+ table
->step
* ins_index
);
2457 /* If we were able to replace a filter by inserting
2458 * at a lower depth, clear the replaced filter
2460 if (ins_index
!= rep_index
&& rep_index
>= 0)
2461 ef4_farch_filter_table_clear_entry(efx
, table
,
2465 netif_vdbg(efx
, hw
, efx
->net_dev
,
2466 "%s: filter type %d index %d rxq %u set",
2467 __func__
, spec
.type
, ins_index
, spec
.dmaq_id
);
2468 rc
= ef4_farch_filter_make_id(&spec
, ins_index
);
2471 spin_unlock_bh(&efx
->filter_lock
);
2476 ef4_farch_filter_table_clear_entry(struct ef4_nic
*efx
,
2477 struct ef4_farch_filter_table
*table
,
2478 unsigned int filter_idx
)
2480 static ef4_oword_t filter
;
2482 EF4_WARN_ON_PARANOID(!test_bit(filter_idx
, table
->used_bitmap
));
2483 BUG_ON(table
->offset
== 0); /* can't clear MAC default filters */
2485 __clear_bit(filter_idx
, table
->used_bitmap
);
2487 memset(&table
->spec
[filter_idx
], 0, sizeof(table
->spec
[0]));
2489 ef4_writeo(efx
, &filter
, table
->offset
+ table
->step
* filter_idx
);
2491 /* If this filter required a greater search depth than
2492 * any other, the search limit for its type can now be
2493 * decreased. However, it is hard to determine that
2494 * unless the table has become completely empty - in
2495 * which case, all its search limits can be set to 0.
2497 if (unlikely(table
->used
== 0)) {
2498 memset(table
->search_limit
, 0, sizeof(table
->search_limit
));
2499 if (table
->id
== EF4_FARCH_FILTER_TABLE_TX_MAC
)
2500 ef4_farch_filter_push_tx_limits(efx
);
2502 ef4_farch_filter_push_rx_config(efx
);
2506 static int ef4_farch_filter_remove(struct ef4_nic
*efx
,
2507 struct ef4_farch_filter_table
*table
,
2508 unsigned int filter_idx
,
2509 enum ef4_filter_priority priority
)
2511 struct ef4_farch_filter_spec
*spec
= &table
->spec
[filter_idx
];
2513 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2514 spec
->priority
!= priority
)
2517 if (spec
->flags
& EF4_FILTER_FLAG_RX_OVER_AUTO
) {
2518 ef4_farch_filter_init_rx_auto(efx
, spec
);
2519 ef4_farch_filter_push_rx_config(efx
);
2521 ef4_farch_filter_table_clear_entry(efx
, table
, filter_idx
);
2527 int ef4_farch_filter_remove_safe(struct ef4_nic
*efx
,
2528 enum ef4_filter_priority priority
,
2531 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2532 enum ef4_farch_filter_table_id table_id
;
2533 struct ef4_farch_filter_table
*table
;
2534 unsigned int filter_idx
;
2535 struct ef4_farch_filter_spec
*spec
;
2538 table_id
= ef4_farch_filter_id_table_id(filter_id
);
2539 if ((unsigned int)table_id
>= EF4_FARCH_FILTER_TABLE_COUNT
)
2541 table
= &state
->table
[table_id
];
2543 filter_idx
= ef4_farch_filter_id_index(filter_id
);
2544 if (filter_idx
>= table
->size
)
2546 spec
= &table
->spec
[filter_idx
];
2548 spin_lock_bh(&efx
->filter_lock
);
2549 rc
= ef4_farch_filter_remove(efx
, table
, filter_idx
, priority
);
2550 spin_unlock_bh(&efx
->filter_lock
);
2555 int ef4_farch_filter_get_safe(struct ef4_nic
*efx
,
2556 enum ef4_filter_priority priority
,
2557 u32 filter_id
, struct ef4_filter_spec
*spec_buf
)
2559 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2560 enum ef4_farch_filter_table_id table_id
;
2561 struct ef4_farch_filter_table
*table
;
2562 struct ef4_farch_filter_spec
*spec
;
2563 unsigned int filter_idx
;
2566 table_id
= ef4_farch_filter_id_table_id(filter_id
);
2567 if ((unsigned int)table_id
>= EF4_FARCH_FILTER_TABLE_COUNT
)
2569 table
= &state
->table
[table_id
];
2571 filter_idx
= ef4_farch_filter_id_index(filter_id
);
2572 if (filter_idx
>= table
->size
)
2574 spec
= &table
->spec
[filter_idx
];
2576 spin_lock_bh(&efx
->filter_lock
);
2578 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2579 spec
->priority
== priority
) {
2580 ef4_farch_filter_to_gen_spec(spec_buf
, spec
);
2586 spin_unlock_bh(&efx
->filter_lock
);
2592 ef4_farch_filter_table_clear(struct ef4_nic
*efx
,
2593 enum ef4_farch_filter_table_id table_id
,
2594 enum ef4_filter_priority priority
)
2596 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2597 struct ef4_farch_filter_table
*table
= &state
->table
[table_id
];
2598 unsigned int filter_idx
;
2600 spin_lock_bh(&efx
->filter_lock
);
2601 for (filter_idx
= 0; filter_idx
< table
->size
; ++filter_idx
) {
2602 if (table
->spec
[filter_idx
].priority
!= EF4_FILTER_PRI_AUTO
)
2603 ef4_farch_filter_remove(efx
, table
,
2604 filter_idx
, priority
);
2606 spin_unlock_bh(&efx
->filter_lock
);
2609 int ef4_farch_filter_clear_rx(struct ef4_nic
*efx
,
2610 enum ef4_filter_priority priority
)
2612 ef4_farch_filter_table_clear(efx
, EF4_FARCH_FILTER_TABLE_RX_IP
,
2614 ef4_farch_filter_table_clear(efx
, EF4_FARCH_FILTER_TABLE_RX_MAC
,
2616 ef4_farch_filter_table_clear(efx
, EF4_FARCH_FILTER_TABLE_RX_DEF
,
2621 u32
ef4_farch_filter_count_rx_used(struct ef4_nic
*efx
,
2622 enum ef4_filter_priority priority
)
2624 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2625 enum ef4_farch_filter_table_id table_id
;
2626 struct ef4_farch_filter_table
*table
;
2627 unsigned int filter_idx
;
2630 spin_lock_bh(&efx
->filter_lock
);
2632 for (table_id
= EF4_FARCH_FILTER_TABLE_RX_IP
;
2633 table_id
<= EF4_FARCH_FILTER_TABLE_RX_DEF
;
2635 table
= &state
->table
[table_id
];
2636 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2637 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2638 table
->spec
[filter_idx
].priority
== priority
)
2643 spin_unlock_bh(&efx
->filter_lock
);
2648 s32
ef4_farch_filter_get_rx_ids(struct ef4_nic
*efx
,
2649 enum ef4_filter_priority priority
,
2652 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2653 enum ef4_farch_filter_table_id table_id
;
2654 struct ef4_farch_filter_table
*table
;
2655 unsigned int filter_idx
;
2658 spin_lock_bh(&efx
->filter_lock
);
2660 for (table_id
= EF4_FARCH_FILTER_TABLE_RX_IP
;
2661 table_id
<= EF4_FARCH_FILTER_TABLE_RX_DEF
;
2663 table
= &state
->table
[table_id
];
2664 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2665 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2666 table
->spec
[filter_idx
].priority
== priority
) {
2667 if (count
== size
) {
2671 buf
[count
++] = ef4_farch_filter_make_id(
2672 &table
->spec
[filter_idx
], filter_idx
);
2677 spin_unlock_bh(&efx
->filter_lock
);
2682 /* Restore filter stater after reset */
2683 void ef4_farch_filter_table_restore(struct ef4_nic
*efx
)
2685 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2686 enum ef4_farch_filter_table_id table_id
;
2687 struct ef4_farch_filter_table
*table
;
2689 unsigned int filter_idx
;
2691 spin_lock_bh(&efx
->filter_lock
);
2693 for (table_id
= 0; table_id
< EF4_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2694 table
= &state
->table
[table_id
];
2696 /* Check whether this is a regular register table */
2697 if (table
->step
== 0)
2700 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2701 if (!test_bit(filter_idx
, table
->used_bitmap
))
2703 ef4_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2704 ef4_writeo(efx
, &filter
,
2705 table
->offset
+ table
->step
* filter_idx
);
2709 ef4_farch_filter_push_rx_config(efx
);
2710 ef4_farch_filter_push_tx_limits(efx
);
2712 spin_unlock_bh(&efx
->filter_lock
);
2715 void ef4_farch_filter_table_remove(struct ef4_nic
*efx
)
2717 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2718 enum ef4_farch_filter_table_id table_id
;
2720 for (table_id
= 0; table_id
< EF4_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2721 kfree(state
->table
[table_id
].used_bitmap
);
2722 vfree(state
->table
[table_id
].spec
);
2727 int ef4_farch_filter_table_probe(struct ef4_nic
*efx
)
2729 struct ef4_farch_filter_state
*state
;
2730 struct ef4_farch_filter_table
*table
;
2733 state
= kzalloc(sizeof(struct ef4_farch_filter_state
), GFP_KERNEL
);
2736 efx
->filter_state
= state
;
2738 if (ef4_nic_rev(efx
) >= EF4_REV_FALCON_B0
) {
2739 table
= &state
->table
[EF4_FARCH_FILTER_TABLE_RX_IP
];
2740 table
->id
= EF4_FARCH_FILTER_TABLE_RX_IP
;
2741 table
->offset
= FR_BZ_RX_FILTER_TBL0
;
2742 table
->size
= FR_BZ_RX_FILTER_TBL0_ROWS
;
2743 table
->step
= FR_BZ_RX_FILTER_TBL0_STEP
;
2746 for (table_id
= 0; table_id
< EF4_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2747 table
= &state
->table
[table_id
];
2748 if (table
->size
== 0)
2750 table
->used_bitmap
= kcalloc(BITS_TO_LONGS(table
->size
),
2751 sizeof(unsigned long),
2753 if (!table
->used_bitmap
)
2755 table
->spec
= vzalloc(array_size(sizeof(*table
->spec
),
2761 table
= &state
->table
[EF4_FARCH_FILTER_TABLE_RX_DEF
];
2763 /* RX default filters must always exist */
2764 struct ef4_farch_filter_spec
*spec
;
2767 for (i
= 0; i
< EF4_FARCH_FILTER_SIZE_RX_DEF
; i
++) {
2768 spec
= &table
->spec
[i
];
2769 spec
->type
= EF4_FARCH_FILTER_UC_DEF
+ i
;
2770 ef4_farch_filter_init_rx_auto(efx
, spec
);
2771 __set_bit(i
, table
->used_bitmap
);
2775 ef4_farch_filter_push_rx_config(efx
);
2780 ef4_farch_filter_table_remove(efx
);
2784 /* Update scatter enable flags for filters pointing to our own RX queues */
2785 void ef4_farch_filter_update_rx_scatter(struct ef4_nic
*efx
)
2787 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2788 enum ef4_farch_filter_table_id table_id
;
2789 struct ef4_farch_filter_table
*table
;
2791 unsigned int filter_idx
;
2793 spin_lock_bh(&efx
->filter_lock
);
2795 for (table_id
= EF4_FARCH_FILTER_TABLE_RX_IP
;
2796 table_id
<= EF4_FARCH_FILTER_TABLE_RX_DEF
;
2798 table
= &state
->table
[table_id
];
2800 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2801 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2802 table
->spec
[filter_idx
].dmaq_id
>=
2806 if (efx
->rx_scatter
)
2807 table
->spec
[filter_idx
].flags
|=
2808 EF4_FILTER_FLAG_RX_SCATTER
;
2810 table
->spec
[filter_idx
].flags
&=
2811 ~EF4_FILTER_FLAG_RX_SCATTER
;
2813 if (table_id
== EF4_FARCH_FILTER_TABLE_RX_DEF
)
2814 /* Pushed by ef4_farch_filter_push_rx_config() */
2817 ef4_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2818 ef4_writeo(efx
, &filter
,
2819 table
->offset
+ table
->step
* filter_idx
);
2823 ef4_farch_filter_push_rx_config(efx
);
2825 spin_unlock_bh(&efx
->filter_lock
);
2828 #ifdef CONFIG_RFS_ACCEL
2830 s32
ef4_farch_filter_rfs_insert(struct ef4_nic
*efx
,
2831 struct ef4_filter_spec
*gen_spec
)
2833 return ef4_farch_filter_insert(efx
, gen_spec
, true);
2836 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic
*efx
, u32 flow_id
,
2839 struct ef4_farch_filter_state
*state
= efx
->filter_state
;
2840 struct ef4_farch_filter_table
*table
=
2841 &state
->table
[EF4_FARCH_FILTER_TABLE_RX_IP
];
2843 if (test_bit(index
, table
->used_bitmap
) &&
2844 table
->spec
[index
].priority
== EF4_FILTER_PRI_HINT
&&
2845 rps_may_expire_flow(efx
->net_dev
, table
->spec
[index
].dmaq_id
,
2847 ef4_farch_filter_table_clear_entry(efx
, table
, index
);
2854 #endif /* CONFIG_RFS_ACCEL */
2856 void ef4_farch_filter_sync_rx_mode(struct ef4_nic
*efx
)
2858 struct net_device
*net_dev
= efx
->net_dev
;
2859 struct netdev_hw_addr
*ha
;
2860 union ef4_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
2864 if (!ef4_dev_registered(efx
))
2867 netif_addr_lock_bh(net_dev
);
2869 efx
->unicast_filter
= !(net_dev
->flags
& IFF_PROMISC
);
2871 /* Build multicast hash table */
2872 if (net_dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
2873 memset(mc_hash
, 0xff, sizeof(*mc_hash
));
2875 memset(mc_hash
, 0x00, sizeof(*mc_hash
));
2876 netdev_for_each_mc_addr(ha
, net_dev
) {
2877 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
2878 bit
= crc
& (EF4_MCAST_HASH_ENTRIES
- 1);
2879 __set_bit_le(bit
, mc_hash
);
2882 /* Broadcast packets go through the multicast hash filter.
2883 * ether_crc_le() of the broadcast address is 0xbe2612ff
2884 * so we always add bit 0xff to the mask.
2886 __set_bit_le(0xff, mc_hash
);
2889 netif_addr_unlock_bh(net_dev
);