1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/crc32.h>
18 #include "net_driver.h"
22 #include "farch_regs.h"
24 #include "siena_sriov.h"
26 #include "workarounds.h"
28 /* Falcon-architecture (SFC4000 and SFC9000-family) support */
30 /**************************************************************************
34 **************************************************************************
37 /* This is set to 16 for a good reason. In summary, if larger than
38 * 16, the descriptor cache holds more than a default socket
39 * buffer's worth of packets (for UDP we can only have at most one
40 * socket buffer's worth outstanding). This combined with the fact
41 * that we only get 1 TX event per descriptor cache means the NIC
44 #define TX_DC_ENTRIES 16
45 #define TX_DC_ENTRIES_ORDER 1
47 #define RX_DC_ENTRIES 64
48 #define RX_DC_ENTRIES_ORDER 3
50 /* If EFX_MAX_INT_ERRORS internal errors occur within
51 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
54 #define EFX_INT_ERROR_EXPIRE 3600
55 #define EFX_MAX_INT_ERRORS 5
57 /* Depth of RX flush request fifo */
58 #define EFX_RX_FLUSH_COUNT 4
60 /* Driver generated events */
61 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
62 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
63 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
64 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
66 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
67 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
69 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
71 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
72 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
73 efx_rx_queue_index(_rx_queue))
74 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
75 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
76 efx_rx_queue_index(_rx_queue))
77 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
78 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
81 static void efx_farch_magic_event(struct efx_channel
*channel
, u32 magic
);
83 /**************************************************************************
87 **************************************************************************/
89 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
92 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
96 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
97 const efx_oword_t
*mask
)
99 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
100 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
103 int efx_farch_test_registers(struct efx_nic
*efx
,
104 const struct efx_farch_register_test
*regs
,
107 unsigned address
= 0;
109 efx_oword_t mask
, imask
, original
, reg
, buf
;
111 for (i
= 0; i
< n_regs
; ++i
) {
112 address
= regs
[i
].address
;
113 mask
= imask
= regs
[i
].mask
;
114 EFX_INVERT_OWORD(imask
);
116 efx_reado(efx
, &original
, address
);
118 /* bit sweep on and off */
119 for (j
= 0; j
< 128; j
++) {
120 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
123 /* Test this testable bit can be set in isolation */
124 EFX_AND_OWORD(reg
, original
, mask
);
125 EFX_SET_OWORD32(reg
, j
, j
, 1);
127 efx_writeo(efx
, ®
, address
);
128 efx_reado(efx
, &buf
, address
);
130 if (efx_masked_compare_oword(®
, &buf
, &mask
))
133 /* Test this testable bit can be cleared in isolation */
134 EFX_OR_OWORD(reg
, original
, mask
);
135 EFX_SET_OWORD32(reg
, j
, j
, 0);
137 efx_writeo(efx
, ®
, address
);
138 efx_reado(efx
, &buf
, address
);
140 if (efx_masked_compare_oword(®
, &buf
, &mask
))
144 efx_writeo(efx
, &original
, address
);
150 netif_err(efx
, hw
, efx
->net_dev
,
151 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
152 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
153 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
157 /**************************************************************************
159 * Special buffer handling
160 * Special buffers are used for event queues and the TX and RX
163 *************************************************************************/
166 * Initialise a special buffer
168 * This will define a buffer (previously allocated via
169 * efx_alloc_special_buffer()) in the buffer table, allowing
170 * it to be used for event queues, descriptor rings etc.
173 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
175 efx_qword_t buf_desc
;
180 EFX_BUG_ON_PARANOID(!buffer
->buf
.addr
);
182 /* Write buffer descriptors to NIC */
183 for (i
= 0; i
< buffer
->entries
; i
++) {
184 index
= buffer
->index
+ i
;
185 dma_addr
= buffer
->buf
.dma_addr
+ (i
* EFX_BUF_SIZE
);
186 netif_dbg(efx
, probe
, efx
->net_dev
,
187 "mapping special buffer %d at %llx\n",
188 index
, (unsigned long long)dma_addr
);
189 EFX_POPULATE_QWORD_3(buf_desc
,
190 FRF_AZ_BUF_ADR_REGION
, 0,
191 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
192 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
193 efx_write_buf_tbl(efx
, &buf_desc
, index
);
197 /* Unmaps a buffer and clears the buffer table entries */
199 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
201 efx_oword_t buf_tbl_upd
;
202 unsigned int start
= buffer
->index
;
203 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
205 if (!buffer
->entries
)
208 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
209 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
211 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
212 FRF_AZ_BUF_UPD_CMD
, 0,
213 FRF_AZ_BUF_CLR_CMD
, 1,
214 FRF_AZ_BUF_CLR_END_ID
, end
,
215 FRF_AZ_BUF_CLR_START_ID
, start
);
216 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
220 * Allocate a new special buffer
222 * This allocates memory for a new buffer, clears it and allocates a
223 * new buffer ID range. It does not write into the buffer table.
225 * This call will allocate 4KB buffers, since 8KB buffers can't be
226 * used for event queues and descriptor rings.
228 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
229 struct efx_special_buffer
*buffer
,
232 #ifdef CONFIG_SFC_SRIOV
233 struct siena_nic_data
*nic_data
= efx
->nic_data
;
235 len
= ALIGN(len
, EFX_BUF_SIZE
);
237 if (efx_nic_alloc_buffer(efx
, &buffer
->buf
, len
, GFP_KERNEL
))
239 buffer
->entries
= len
/ EFX_BUF_SIZE
;
240 BUG_ON(buffer
->buf
.dma_addr
& (EFX_BUF_SIZE
- 1));
242 /* Select new buffer ID */
243 buffer
->index
= efx
->next_buffer_table
;
244 efx
->next_buffer_table
+= buffer
->entries
;
245 #ifdef CONFIG_SFC_SRIOV
246 BUG_ON(efx_siena_sriov_enabled(efx
) &&
247 nic_data
->vf_buftbl_base
< efx
->next_buffer_table
);
250 netif_dbg(efx
, probe
, efx
->net_dev
,
251 "allocating special buffers %d-%d at %llx+%x "
252 "(virt %p phys %llx)\n", buffer
->index
,
253 buffer
->index
+ buffer
->entries
- 1,
254 (u64
)buffer
->buf
.dma_addr
, len
,
255 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
261 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
263 if (!buffer
->buf
.addr
)
266 netif_dbg(efx
, hw
, efx
->net_dev
,
267 "deallocating special buffers %d-%d at %llx+%x "
268 "(virt %p phys %llx)\n", buffer
->index
,
269 buffer
->index
+ buffer
->entries
- 1,
270 (u64
)buffer
->buf
.dma_addr
, buffer
->buf
.len
,
271 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
273 efx_nic_free_buffer(efx
, &buffer
->buf
);
277 /**************************************************************************
281 **************************************************************************/
283 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
284 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
289 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
290 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
291 efx_writed_page(tx_queue
->efx
, ®
,
292 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
295 /* Write pointer and first descriptor for TX descriptor ring */
296 static inline void efx_farch_push_tx_desc(struct efx_tx_queue
*tx_queue
,
297 const efx_qword_t
*txd
)
302 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
303 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
305 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
306 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
307 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
309 efx_writeo_page(tx_queue
->efx
, ®
,
310 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
314 /* For each entry inserted into the software descriptor ring, create a
315 * descriptor in the hardware TX descriptor ring (in host memory), and
318 void efx_farch_tx_write(struct efx_tx_queue
*tx_queue
)
320 struct efx_tx_buffer
*buffer
;
323 unsigned old_write_count
= tx_queue
->write_count
;
325 tx_queue
->xmit_more_available
= false;
326 if (unlikely(tx_queue
->write_count
== tx_queue
->insert_count
))
330 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
331 buffer
= &tx_queue
->buffer
[write_ptr
];
332 txd
= efx_tx_desc(tx_queue
, write_ptr
);
333 ++tx_queue
->write_count
;
335 EFX_BUG_ON_PARANOID(buffer
->flags
& EFX_TX_BUF_OPTION
);
337 /* Create TX descriptor ring entry */
338 BUILD_BUG_ON(EFX_TX_BUF_CONT
!= 1);
339 EFX_POPULATE_QWORD_4(*txd
,
341 buffer
->flags
& EFX_TX_BUF_CONT
,
342 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
343 FSF_AZ_TX_KER_BUF_REGION
, 0,
344 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
345 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
347 wmb(); /* Ensure descriptors are written before they are fetched */
349 if (efx_nic_may_push_tx_desc(tx_queue
, old_write_count
)) {
350 txd
= efx_tx_desc(tx_queue
,
351 old_write_count
& tx_queue
->ptr_mask
);
352 efx_farch_push_tx_desc(tx_queue
, txd
);
355 efx_farch_notify_tx_desc(tx_queue
);
359 /* Allocate hardware resources for a TX queue */
360 int efx_farch_tx_probe(struct efx_tx_queue
*tx_queue
)
362 struct efx_nic
*efx
= tx_queue
->efx
;
365 entries
= tx_queue
->ptr_mask
+ 1;
366 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
367 entries
* sizeof(efx_qword_t
));
370 void efx_farch_tx_init(struct efx_tx_queue
*tx_queue
)
372 struct efx_nic
*efx
= tx_queue
->efx
;
375 /* Pin TX descriptor ring */
376 efx_init_special_buffer(efx
, &tx_queue
->txd
);
378 /* Push TX descriptor ring to card */
379 EFX_POPULATE_OWORD_10(reg
,
380 FRF_AZ_TX_DESCQ_EN
, 1,
381 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
382 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
383 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
384 FRF_AZ_TX_DESCQ_EVQ_ID
,
385 tx_queue
->channel
->channel
,
386 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
387 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
388 FRF_AZ_TX_DESCQ_SIZE
,
389 __ffs(tx_queue
->txd
.entries
),
390 FRF_AZ_TX_DESCQ_TYPE
, 0,
391 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
393 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
394 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
395 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
396 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
400 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
403 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
404 /* Only 128 bits in this register */
405 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
407 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
408 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
409 __clear_bit_le(tx_queue
->queue
, ®
);
411 __set_bit_le(tx_queue
->queue
, ®
);
412 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
415 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
416 EFX_POPULATE_OWORD_1(reg
,
418 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
420 FFE_BZ_TX_PACE_RESERVED
);
421 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
426 static void efx_farch_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
428 struct efx_nic
*efx
= tx_queue
->efx
;
429 efx_oword_t tx_flush_descq
;
431 WARN_ON(atomic_read(&tx_queue
->flush_outstanding
));
432 atomic_set(&tx_queue
->flush_outstanding
, 1);
434 EFX_POPULATE_OWORD_2(tx_flush_descq
,
435 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
436 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
437 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
440 void efx_farch_tx_fini(struct efx_tx_queue
*tx_queue
)
442 struct efx_nic
*efx
= tx_queue
->efx
;
443 efx_oword_t tx_desc_ptr
;
445 /* Remove TX descriptor ring from card */
446 EFX_ZERO_OWORD(tx_desc_ptr
);
447 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
450 /* Unpin TX descriptor ring */
451 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
454 /* Free buffers backing TX queue */
455 void efx_farch_tx_remove(struct efx_tx_queue
*tx_queue
)
457 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
460 /**************************************************************************
464 **************************************************************************/
466 /* This creates an entry in the RX descriptor queue */
468 efx_farch_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
470 struct efx_rx_buffer
*rx_buf
;
473 rxd
= efx_rx_desc(rx_queue
, index
);
474 rx_buf
= efx_rx_buffer(rx_queue
, index
);
475 EFX_POPULATE_QWORD_3(*rxd
,
476 FSF_AZ_RX_KER_BUF_SIZE
,
478 rx_queue
->efx
->type
->rx_buffer_padding
,
479 FSF_AZ_RX_KER_BUF_REGION
, 0,
480 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
483 /* This writes to the RX_DESC_WPTR register for the specified receive
486 void efx_farch_rx_write(struct efx_rx_queue
*rx_queue
)
488 struct efx_nic
*efx
= rx_queue
->efx
;
492 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
493 efx_farch_build_rx_desc(
495 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
496 ++rx_queue
->notified_count
;
500 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
501 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
502 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
503 efx_rx_queue_index(rx_queue
));
506 int efx_farch_rx_probe(struct efx_rx_queue
*rx_queue
)
508 struct efx_nic
*efx
= rx_queue
->efx
;
511 entries
= rx_queue
->ptr_mask
+ 1;
512 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
513 entries
* sizeof(efx_qword_t
));
516 void efx_farch_rx_init(struct efx_rx_queue
*rx_queue
)
518 efx_oword_t rx_desc_ptr
;
519 struct efx_nic
*efx
= rx_queue
->efx
;
520 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
521 bool iscsi_digest_en
= is_b0
;
524 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
525 * DMA to continue after a PCIe page boundary (and scattering
526 * is not possible). In Falcon B0 and Siena, it enables
529 jumbo_en
= !is_b0
|| efx
->rx_scatter
;
531 netif_dbg(efx
, hw
, efx
->net_dev
,
532 "RX queue %d ring in special buffers %d-%d\n",
533 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
534 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
536 rx_queue
->scatter_n
= 0;
538 /* Pin RX descriptor ring */
539 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
541 /* Push RX descriptor ring to card */
542 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
543 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
544 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
545 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
546 FRF_AZ_RX_DESCQ_EVQ_ID
,
547 efx_rx_queue_channel(rx_queue
)->channel
,
548 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
549 FRF_AZ_RX_DESCQ_LABEL
,
550 efx_rx_queue_index(rx_queue
),
551 FRF_AZ_RX_DESCQ_SIZE
,
552 __ffs(rx_queue
->rxd
.entries
),
553 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
554 FRF_AZ_RX_DESCQ_JUMBO
, jumbo_en
,
555 FRF_AZ_RX_DESCQ_EN
, 1);
556 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
557 efx_rx_queue_index(rx_queue
));
560 static void efx_farch_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
562 struct efx_nic
*efx
= rx_queue
->efx
;
563 efx_oword_t rx_flush_descq
;
565 EFX_POPULATE_OWORD_2(rx_flush_descq
,
566 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
567 FRF_AZ_RX_FLUSH_DESCQ
,
568 efx_rx_queue_index(rx_queue
));
569 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
572 void efx_farch_rx_fini(struct efx_rx_queue
*rx_queue
)
574 efx_oword_t rx_desc_ptr
;
575 struct efx_nic
*efx
= rx_queue
->efx
;
577 /* Remove RX descriptor ring from card */
578 EFX_ZERO_OWORD(rx_desc_ptr
);
579 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
580 efx_rx_queue_index(rx_queue
));
582 /* Unpin RX descriptor ring */
583 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
586 /* Free buffers backing RX queue */
587 void efx_farch_rx_remove(struct efx_rx_queue
*rx_queue
)
589 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
592 /**************************************************************************
596 **************************************************************************/
598 /* efx_farch_flush_queues() must be woken up when all flushes are completed,
599 * or more RX flushes can be kicked off.
601 static bool efx_farch_flush_wake(struct efx_nic
*efx
)
603 /* Ensure that all updates are visible to efx_farch_flush_queues() */
606 return (atomic_read(&efx
->active_queues
) == 0 ||
607 (atomic_read(&efx
->rxq_flush_outstanding
) < EFX_RX_FLUSH_COUNT
608 && atomic_read(&efx
->rxq_flush_pending
) > 0));
611 static bool efx_check_tx_flush_complete(struct efx_nic
*efx
)
614 efx_oword_t txd_ptr_tbl
;
615 struct efx_channel
*channel
;
616 struct efx_tx_queue
*tx_queue
;
618 efx_for_each_channel(channel
, efx
) {
619 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
620 efx_reado_table(efx
, &txd_ptr_tbl
,
621 FR_BZ_TX_DESC_PTR_TBL
, tx_queue
->queue
);
622 if (EFX_OWORD_FIELD(txd_ptr_tbl
,
623 FRF_AZ_TX_DESCQ_FLUSH
) ||
624 EFX_OWORD_FIELD(txd_ptr_tbl
,
625 FRF_AZ_TX_DESCQ_EN
)) {
626 netif_dbg(efx
, hw
, efx
->net_dev
,
627 "flush did not complete on TXQ %d\n",
630 } else if (atomic_cmpxchg(&tx_queue
->flush_outstanding
,
632 /* The flush is complete, but we didn't
633 * receive a flush completion event
635 netif_dbg(efx
, hw
, efx
->net_dev
,
636 "flush complete on TXQ %d, so drain "
637 "the queue\n", tx_queue
->queue
);
638 /* Don't need to increment active_queues as it
639 * has already been incremented for the queues
640 * which did not drain
642 efx_farch_magic_event(channel
,
643 EFX_CHANNEL_MAGIC_TX_DRAIN(
652 /* Flush all the transmit queues, and continue flushing receive queues until
653 * they're all flushed. Wait for the DRAIN events to be received so that there
654 * are no more RX and TX events left on any channel. */
655 static int efx_farch_do_flush(struct efx_nic
*efx
)
657 unsigned timeout
= msecs_to_jiffies(5000); /* 5s for all flushes and drains */
658 struct efx_channel
*channel
;
659 struct efx_rx_queue
*rx_queue
;
660 struct efx_tx_queue
*tx_queue
;
663 efx_for_each_channel(channel
, efx
) {
664 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
665 efx_farch_flush_tx_queue(tx_queue
);
667 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
668 rx_queue
->flush_pending
= true;
669 atomic_inc(&efx
->rxq_flush_pending
);
673 while (timeout
&& atomic_read(&efx
->active_queues
) > 0) {
674 /* If SRIOV is enabled, then offload receive queue flushing to
675 * the firmware (though we will still have to poll for
676 * completion). If that fails, fall back to the old scheme.
678 if (efx_siena_sriov_enabled(efx
)) {
679 rc
= efx_mcdi_flush_rxqs(efx
);
684 /* The hardware supports four concurrent rx flushes, each of
685 * which may need to be retried if there is an outstanding
688 efx_for_each_channel(channel
, efx
) {
689 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
690 if (atomic_read(&efx
->rxq_flush_outstanding
) >=
694 if (rx_queue
->flush_pending
) {
695 rx_queue
->flush_pending
= false;
696 atomic_dec(&efx
->rxq_flush_pending
);
697 atomic_inc(&efx
->rxq_flush_outstanding
);
698 efx_farch_flush_rx_queue(rx_queue
);
704 timeout
= wait_event_timeout(efx
->flush_wq
,
705 efx_farch_flush_wake(efx
),
709 if (atomic_read(&efx
->active_queues
) &&
710 !efx_check_tx_flush_complete(efx
)) {
711 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues "
712 "(rx %d+%d)\n", atomic_read(&efx
->active_queues
),
713 atomic_read(&efx
->rxq_flush_outstanding
),
714 atomic_read(&efx
->rxq_flush_pending
));
717 atomic_set(&efx
->active_queues
, 0);
718 atomic_set(&efx
->rxq_flush_pending
, 0);
719 atomic_set(&efx
->rxq_flush_outstanding
, 0);
725 int efx_farch_fini_dmaq(struct efx_nic
*efx
)
727 struct efx_channel
*channel
;
728 struct efx_tx_queue
*tx_queue
;
729 struct efx_rx_queue
*rx_queue
;
732 /* Do not attempt to write to the NIC during EEH recovery */
733 if (efx
->state
!= STATE_RECOVERY
) {
734 /* Only perform flush if DMA is enabled */
735 if (efx
->pci_dev
->is_busmaster
) {
736 efx
->type
->prepare_flush(efx
);
737 rc
= efx_farch_do_flush(efx
);
738 efx
->type
->finish_flush(efx
);
741 efx_for_each_channel(channel
, efx
) {
742 efx_for_each_channel_rx_queue(rx_queue
, channel
)
743 efx_farch_rx_fini(rx_queue
);
744 efx_for_each_channel_tx_queue(tx_queue
, channel
)
745 efx_farch_tx_fini(tx_queue
);
752 /* Reset queue and flush accounting after FLR
754 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
755 * mastering was disabled), in which case we don't receive (RXQ) flush
756 * completion events. This means that efx->rxq_flush_outstanding remained at 4
757 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
758 * events were received, and we didn't go through efx_check_tx_flush_complete())
759 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
760 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
761 * for batched flush requests; and the efx->active_queues gets messed up because
762 * we keep incrementing for the newly initialised queues, but it never went to
763 * zero previously. Then we get a timeout every time we try to restart the
764 * queues, as it doesn't go back to zero when we should be flushing the queues.
766 void efx_farch_finish_flr(struct efx_nic
*efx
)
768 atomic_set(&efx
->rxq_flush_pending
, 0);
769 atomic_set(&efx
->rxq_flush_outstanding
, 0);
770 atomic_set(&efx
->active_queues
, 0);
774 /**************************************************************************
776 * Event queue processing
777 * Event queues are processed by per-channel tasklets.
779 **************************************************************************/
781 /* Update a channel's event queue's read pointer (RPTR) register
783 * This writes the EVQ_RPTR_REG register for the specified channel's
786 void efx_farch_ev_read_ack(struct efx_channel
*channel
)
789 struct efx_nic
*efx
= channel
->efx
;
791 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
792 channel
->eventq_read_ptr
& channel
->eventq_mask
);
794 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
795 * of 4 bytes, but it is really 16 bytes just like later revisions.
797 efx_writed(efx
, ®
,
798 efx
->type
->evq_rptr_tbl_base
+
799 FR_BZ_EVQ_RPTR_STEP
* channel
->channel
);
802 /* Use HW to insert a SW defined event */
803 void efx_farch_generate_event(struct efx_nic
*efx
, unsigned int evq
,
806 efx_oword_t drv_ev_reg
;
808 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
809 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
810 drv_ev_reg
.u32
[0] = event
->u32
[0];
811 drv_ev_reg
.u32
[1] = event
->u32
[1];
812 drv_ev_reg
.u32
[2] = 0;
813 drv_ev_reg
.u32
[3] = 0;
814 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, evq
);
815 efx_writeo(efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
818 static void efx_farch_magic_event(struct efx_channel
*channel
, u32 magic
)
822 EFX_POPULATE_QWORD_2(event
, FSF_AZ_EV_CODE
,
823 FSE_AZ_EV_CODE_DRV_GEN_EV
,
824 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
825 efx_farch_generate_event(channel
->efx
, channel
->channel
, &event
);
828 /* Handle a transmit completion event
830 * The NIC batches TX completion events; the message we receive is of
831 * the form "complete all TX events up to this index".
834 efx_farch_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
836 unsigned int tx_ev_desc_ptr
;
837 unsigned int tx_ev_q_label
;
838 struct efx_tx_queue
*tx_queue
;
839 struct efx_nic
*efx
= channel
->efx
;
842 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
845 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
846 /* Transmit completion */
847 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
848 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
849 tx_queue
= efx_channel_get_tx_queue(
850 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
851 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
853 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
854 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
855 /* Rewrite the FIFO write pointer */
856 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
857 tx_queue
= efx_channel_get_tx_queue(
858 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
860 netif_tx_lock(efx
->net_dev
);
861 efx_farch_notify_tx_desc(tx_queue
);
862 netif_tx_unlock(efx
->net_dev
);
863 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
)) {
864 efx_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
866 netif_err(efx
, tx_err
, efx
->net_dev
,
867 "channel %d unexpected TX event "
868 EFX_QWORD_FMT
"\n", channel
->channel
,
869 EFX_QWORD_VAL(*event
));
875 /* Detect errors included in the rx_evt_pkt_ok bit. */
876 static u16
efx_farch_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
877 const efx_qword_t
*event
)
879 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
880 struct efx_nic
*efx
= rx_queue
->efx
;
881 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
882 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
883 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
884 bool rx_ev_other_err
, rx_ev_pause_frm
;
885 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
886 unsigned rx_ev_pkt_type
;
888 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
889 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
890 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
891 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
892 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
893 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
894 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
895 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
896 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
897 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
898 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
899 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
900 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
901 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
902 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
904 /* Every error apart from tobe_disc and pause_frm */
905 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
906 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
907 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
909 /* Count errors that are not in MAC stats. Ignore expected
910 * checksum errors during self-test. */
912 ++channel
->n_rx_frm_trunc
;
913 else if (rx_ev_tobe_disc
)
914 ++channel
->n_rx_tobe_disc
;
915 else if (!efx
->loopback_selftest
) {
916 if (rx_ev_ip_hdr_chksum_err
)
917 ++channel
->n_rx_ip_hdr_chksum_err
;
918 else if (rx_ev_tcp_udp_chksum_err
)
919 ++channel
->n_rx_tcp_udp_chksum_err
;
922 /* TOBE_DISC is expected on unicast mismatches; don't print out an
923 * error message. FRM_TRUNC indicates RXDP dropped the packet due
924 * to a FIFO overflow.
927 if (rx_ev_other_err
&& net_ratelimit()) {
928 netif_dbg(efx
, rx_err
, efx
->net_dev
,
929 " RX queue %d unexpected RX event "
930 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
931 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
932 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
933 rx_ev_ip_hdr_chksum_err
?
934 " [IP_HDR_CHKSUM_ERR]" : "",
935 rx_ev_tcp_udp_chksum_err
?
936 " [TCP_UDP_CHKSUM_ERR]" : "",
937 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
938 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
939 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
940 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
941 rx_ev_pause_frm
? " [PAUSE]" : "");
945 /* The frame must be discarded if any of these are true. */
946 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
947 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
948 EFX_RX_PKT_DISCARD
: 0;
951 /* Handle receive events that are not in-order. Return true if this
952 * can be handled as a partial packet discard, false if it's more
956 efx_farch_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
958 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
959 struct efx_nic
*efx
= rx_queue
->efx
;
960 unsigned expected
, dropped
;
962 if (rx_queue
->scatter_n
&&
963 index
== ((rx_queue
->removed_count
+ rx_queue
->scatter_n
- 1) &
964 rx_queue
->ptr_mask
)) {
965 ++channel
->n_rx_nodesc_trunc
;
969 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
970 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
971 netif_info(efx
, rx_err
, efx
->net_dev
,
972 "dropped %d events (index=%d expected=%d)\n",
973 dropped
, index
, expected
);
975 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
976 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
980 /* Handle a packet received event
982 * The NIC gives a "discard" flag if it's a unicast packet with the
983 * wrong destination address
984 * Also "is multicast" and "matches multicast filter" flags can be used to
985 * discard non-matching multicast packets.
988 efx_farch_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
990 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
991 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
992 unsigned expected_ptr
;
993 bool rx_ev_pkt_ok
, rx_ev_sop
, rx_ev_cont
;
995 struct efx_rx_queue
*rx_queue
;
996 struct efx_nic
*efx
= channel
->efx
;
998 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
1001 rx_ev_cont
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
);
1002 rx_ev_sop
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
);
1003 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
1006 rx_queue
= efx_channel_get_rx_queue(channel
);
1008 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
1009 expected_ptr
= ((rx_queue
->removed_count
+ rx_queue
->scatter_n
) &
1010 rx_queue
->ptr_mask
);
1012 /* Check for partial drops and other errors */
1013 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
) ||
1014 unlikely(rx_ev_sop
!= (rx_queue
->scatter_n
== 0))) {
1015 if (rx_ev_desc_ptr
!= expected_ptr
&&
1016 !efx_farch_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
))
1019 /* Discard all pending fragments */
1020 if (rx_queue
->scatter_n
) {
1023 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1024 rx_queue
->scatter_n
, 0, EFX_RX_PKT_DISCARD
);
1025 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1026 rx_queue
->scatter_n
= 0;
1029 /* Return if there is no new fragment */
1030 if (rx_ev_desc_ptr
!= expected_ptr
)
1033 /* Discard new fragment if not SOP */
1037 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1038 1, 0, EFX_RX_PKT_DISCARD
);
1039 ++rx_queue
->removed_count
;
1044 ++rx_queue
->scatter_n
;
1048 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
1049 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
1050 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
1052 if (likely(rx_ev_pkt_ok
)) {
1053 /* If packet is marked as OK then we can rely on the
1054 * hardware checksum and classification.
1057 switch (rx_ev_hdr_type
) {
1058 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
:
1059 flags
|= EFX_RX_PKT_TCP
;
1061 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
:
1062 flags
|= EFX_RX_PKT_CSUMMED
;
1064 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER
:
1065 case FSE_AZ_RX_EV_HDR_TYPE_OTHER
:
1069 flags
= efx_farch_handle_rx_not_ok(rx_queue
, event
);
1072 /* Detect multicast packets that didn't match the filter */
1073 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
1074 if (rx_ev_mcast_pkt
) {
1075 unsigned int rx_ev_mcast_hash_match
=
1076 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
1078 if (unlikely(!rx_ev_mcast_hash_match
)) {
1079 ++channel
->n_rx_mcast_mismatch
;
1080 flags
|= EFX_RX_PKT_DISCARD
;
1084 channel
->irq_mod_score
+= 2;
1086 /* Handle received packet */
1087 efx_rx_packet(rx_queue
,
1088 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1089 rx_queue
->scatter_n
, rx_ev_byte_cnt
, flags
);
1090 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1091 rx_queue
->scatter_n
= 0;
1094 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1095 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1096 * of all transmit completions.
1099 efx_farch_handle_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1101 struct efx_tx_queue
*tx_queue
;
1104 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1105 if (qid
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1106 tx_queue
= efx_get_tx_queue(efx
, qid
/ EFX_TXQ_TYPES
,
1107 qid
% EFX_TXQ_TYPES
);
1108 if (atomic_cmpxchg(&tx_queue
->flush_outstanding
, 1, 0)) {
1109 efx_farch_magic_event(tx_queue
->channel
,
1110 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue
));
1115 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1116 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1117 * the RX queue back to the mask of RX queues in need of flushing.
1120 efx_farch_handle_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1122 struct efx_channel
*channel
;
1123 struct efx_rx_queue
*rx_queue
;
1127 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1128 failed
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1129 if (qid
>= efx
->n_channels
)
1131 channel
= efx_get_channel(efx
, qid
);
1132 if (!efx_channel_has_rx_queue(channel
))
1134 rx_queue
= efx_channel_get_rx_queue(channel
);
1137 netif_info(efx
, hw
, efx
->net_dev
,
1138 "RXQ %d flush retry\n", qid
);
1139 rx_queue
->flush_pending
= true;
1140 atomic_inc(&efx
->rxq_flush_pending
);
1142 efx_farch_magic_event(efx_rx_queue_channel(rx_queue
),
1143 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
));
1145 atomic_dec(&efx
->rxq_flush_outstanding
);
1146 if (efx_farch_flush_wake(efx
))
1147 wake_up(&efx
->flush_wq
);
1151 efx_farch_handle_drain_event(struct efx_channel
*channel
)
1153 struct efx_nic
*efx
= channel
->efx
;
1155 WARN_ON(atomic_read(&efx
->active_queues
) == 0);
1156 atomic_dec(&efx
->active_queues
);
1157 if (efx_farch_flush_wake(efx
))
1158 wake_up(&efx
->flush_wq
);
1161 static void efx_farch_handle_generated_event(struct efx_channel
*channel
,
1164 struct efx_nic
*efx
= channel
->efx
;
1165 struct efx_rx_queue
*rx_queue
=
1166 efx_channel_has_rx_queue(channel
) ?
1167 efx_channel_get_rx_queue(channel
) : NULL
;
1168 unsigned magic
, code
;
1170 magic
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1171 code
= _EFX_CHANNEL_MAGIC_CODE(magic
);
1173 if (magic
== EFX_CHANNEL_MAGIC_TEST(channel
)) {
1174 channel
->event_test_cpu
= raw_smp_processor_id();
1175 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_FILL(rx_queue
)) {
1176 /* The queue must be empty, so we won't receive any rx
1177 * events, so efx_process_channel() won't refill the
1178 * queue. Refill it here */
1179 efx_fast_push_rx_descriptors(rx_queue
, true);
1180 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
)) {
1181 efx_farch_handle_drain_event(channel
);
1182 } else if (code
== _EFX_CHANNEL_MAGIC_TX_DRAIN
) {
1183 efx_farch_handle_drain_event(channel
);
1185 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
1186 "generated event "EFX_QWORD_FMT
"\n",
1187 channel
->channel
, EFX_QWORD_VAL(*event
));
1192 efx_farch_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1194 struct efx_nic
*efx
= channel
->efx
;
1195 unsigned int ev_sub_code
;
1196 unsigned int ev_sub_data
;
1198 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
1199 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1201 switch (ev_sub_code
) {
1202 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
1203 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
1204 channel
->channel
, ev_sub_data
);
1205 efx_farch_handle_tx_flush_done(efx
, event
);
1206 #ifdef CONFIG_SFC_SRIOV
1207 efx_siena_sriov_tx_flush_done(efx
, event
);
1210 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
1211 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
1212 channel
->channel
, ev_sub_data
);
1213 efx_farch_handle_rx_flush_done(efx
, event
);
1214 #ifdef CONFIG_SFC_SRIOV
1215 efx_siena_sriov_rx_flush_done(efx
, event
);
1218 case FSE_AZ_EVQ_INIT_DONE_EV
:
1219 netif_dbg(efx
, hw
, efx
->net_dev
,
1220 "channel %d EVQ %d initialised\n",
1221 channel
->channel
, ev_sub_data
);
1223 case FSE_AZ_SRM_UPD_DONE_EV
:
1224 netif_vdbg(efx
, hw
, efx
->net_dev
,
1225 "channel %d SRAM update done\n", channel
->channel
);
1227 case FSE_AZ_WAKE_UP_EV
:
1228 netif_vdbg(efx
, hw
, efx
->net_dev
,
1229 "channel %d RXQ %d wakeup event\n",
1230 channel
->channel
, ev_sub_data
);
1232 case FSE_AZ_TIMER_EV
:
1233 netif_vdbg(efx
, hw
, efx
->net_dev
,
1234 "channel %d RX queue %d timer expired\n",
1235 channel
->channel
, ev_sub_data
);
1237 case FSE_AA_RX_RECOVER_EV
:
1238 netif_err(efx
, rx_err
, efx
->net_dev
,
1239 "channel %d seen DRIVER RX_RESET event. "
1240 "Resetting.\n", channel
->channel
);
1241 atomic_inc(&efx
->rx_reset
);
1242 efx_schedule_reset(efx
,
1243 EFX_WORKAROUND_6555(efx
) ?
1244 RESET_TYPE_RX_RECOVERY
:
1245 RESET_TYPE_DISABLE
);
1247 case FSE_BZ_RX_DSC_ERROR_EV
:
1248 if (ev_sub_data
< EFX_VI_BASE
) {
1249 netif_err(efx
, rx_err
, efx
->net_dev
,
1250 "RX DMA Q %d reports descriptor fetch error."
1251 " RX Q %d is disabled.\n", ev_sub_data
,
1253 efx_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
1255 #ifdef CONFIG_SFC_SRIOV
1257 efx_siena_sriov_desc_fetch_err(efx
, ev_sub_data
);
1260 case FSE_BZ_TX_DSC_ERROR_EV
:
1261 if (ev_sub_data
< EFX_VI_BASE
) {
1262 netif_err(efx
, tx_err
, efx
->net_dev
,
1263 "TX DMA Q %d reports descriptor fetch error."
1264 " TX Q %d is disabled.\n", ev_sub_data
,
1266 efx_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
1268 #ifdef CONFIG_SFC_SRIOV
1270 efx_siena_sriov_desc_fetch_err(efx
, ev_sub_data
);
1274 netif_vdbg(efx
, hw
, efx
->net_dev
,
1275 "channel %d unknown driver event code %d "
1276 "data %04x\n", channel
->channel
, ev_sub_code
,
1282 int efx_farch_ev_process(struct efx_channel
*channel
, int budget
)
1284 struct efx_nic
*efx
= channel
->efx
;
1285 unsigned int read_ptr
;
1286 efx_qword_t event
, *p_event
;
1294 read_ptr
= channel
->eventq_read_ptr
;
1297 p_event
= efx_event(channel
, read_ptr
);
1300 if (!efx_event_present(&event
))
1304 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1305 "channel %d event is "EFX_QWORD_FMT
"\n",
1306 channel
->channel
, EFX_QWORD_VAL(event
));
1308 /* Clear this event by marking it all ones */
1309 EFX_SET_QWORD(*p_event
);
1313 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1316 case FSE_AZ_EV_CODE_RX_EV
:
1317 efx_farch_handle_rx_event(channel
, &event
);
1318 if (++spent
== budget
)
1321 case FSE_AZ_EV_CODE_TX_EV
:
1322 tx_packets
+= efx_farch_handle_tx_event(channel
,
1324 if (tx_packets
> efx
->txq_entries
) {
1329 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1330 efx_farch_handle_generated_event(channel
, &event
);
1332 case FSE_AZ_EV_CODE_DRIVER_EV
:
1333 efx_farch_handle_driver_event(channel
, &event
);
1335 #ifdef CONFIG_SFC_SRIOV
1336 case FSE_CZ_EV_CODE_USER_EV
:
1337 efx_siena_sriov_event(channel
, &event
);
1340 case FSE_CZ_EV_CODE_MCDI_EV
:
1341 efx_mcdi_process_event(channel
, &event
);
1343 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1344 if (efx
->type
->handle_global_event
&&
1345 efx
->type
->handle_global_event(channel
, &event
))
1347 /* else fall through */
1349 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1350 "channel %d unknown event type %d (data "
1351 EFX_QWORD_FMT
")\n", channel
->channel
,
1352 ev_code
, EFX_QWORD_VAL(event
));
1357 channel
->eventq_read_ptr
= read_ptr
;
1361 /* Allocate buffer table entries for event queue */
1362 int efx_farch_ev_probe(struct efx_channel
*channel
)
1364 struct efx_nic
*efx
= channel
->efx
;
1367 entries
= channel
->eventq_mask
+ 1;
1368 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1369 entries
* sizeof(efx_qword_t
));
1372 int efx_farch_ev_init(struct efx_channel
*channel
)
1375 struct efx_nic
*efx
= channel
->efx
;
1377 netif_dbg(efx
, hw
, efx
->net_dev
,
1378 "channel %d event queue in special buffers %d-%d\n",
1379 channel
->channel
, channel
->eventq
.index
,
1380 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1382 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1383 EFX_POPULATE_OWORD_3(reg
,
1384 FRF_CZ_TIMER_Q_EN
, 1,
1385 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1386 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1387 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1390 /* Pin event queue buffer */
1391 efx_init_special_buffer(efx
, &channel
->eventq
);
1393 /* Fill event queue with all ones (i.e. empty events) */
1394 memset(channel
->eventq
.buf
.addr
, 0xff, channel
->eventq
.buf
.len
);
1396 /* Push event queue to card */
1397 EFX_POPULATE_OWORD_3(reg
,
1399 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1400 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1401 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1407 void efx_farch_ev_fini(struct efx_channel
*channel
)
1410 struct efx_nic
*efx
= channel
->efx
;
1412 /* Remove event queue from card */
1413 EFX_ZERO_OWORD(reg
);
1414 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1416 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1417 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1419 /* Unpin event queue */
1420 efx_fini_special_buffer(efx
, &channel
->eventq
);
1423 /* Free buffers backing event queue */
1424 void efx_farch_ev_remove(struct efx_channel
*channel
)
1426 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1430 void efx_farch_ev_test_generate(struct efx_channel
*channel
)
1432 efx_farch_magic_event(channel
, EFX_CHANNEL_MAGIC_TEST(channel
));
1435 void efx_farch_rx_defer_refill(struct efx_rx_queue
*rx_queue
)
1437 efx_farch_magic_event(efx_rx_queue_channel(rx_queue
),
1438 EFX_CHANNEL_MAGIC_FILL(rx_queue
));
1441 /**************************************************************************
1443 * Hardware interrupts
1444 * The hardware interrupt handler does very little work; all the event
1445 * queue processing is carried out by per-channel tasklets.
1447 **************************************************************************/
1449 /* Enable/disable/generate interrupts */
1450 static inline void efx_farch_interrupts(struct efx_nic
*efx
,
1451 bool enabled
, bool force
)
1453 efx_oword_t int_en_reg_ker
;
1455 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1456 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1457 FRF_AZ_KER_INT_KER
, force
,
1458 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1459 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1462 void efx_farch_irq_enable_master(struct efx_nic
*efx
)
1464 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1465 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1467 efx_farch_interrupts(efx
, true, false);
1470 void efx_farch_irq_disable_master(struct efx_nic
*efx
)
1472 /* Disable interrupts */
1473 efx_farch_interrupts(efx
, false, false);
1476 /* Generate a test interrupt
1477 * Interrupt must already have been enabled, otherwise nasty things
1480 int efx_farch_irq_test_generate(struct efx_nic
*efx
)
1482 efx_farch_interrupts(efx
, true, true);
1486 /* Process a fatal interrupt
1487 * Disable bus mastering ASAP and schedule a reset
1489 irqreturn_t
efx_farch_fatal_interrupt(struct efx_nic
*efx
)
1491 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1492 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1493 efx_oword_t fatal_intr
;
1494 int error
, mem_perr
;
1496 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1497 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1499 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1500 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1501 EFX_OWORD_VAL(fatal_intr
),
1502 error
? "disabling bus mastering" : "no recognised error");
1504 /* If this is a memory parity error dump which blocks are offending */
1505 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1506 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1509 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1510 netif_err(efx
, hw
, efx
->net_dev
,
1511 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1512 EFX_OWORD_VAL(reg
));
1515 /* Disable both devices */
1516 pci_clear_master(efx
->pci_dev
);
1517 if (efx_nic_is_dual_func(efx
))
1518 pci_clear_master(nic_data
->pci_dev2
);
1519 efx_farch_irq_disable_master(efx
);
1521 /* Count errors and reset or disable the NIC accordingly */
1522 if (efx
->int_error_count
== 0 ||
1523 time_after(jiffies
, efx
->int_error_expire
)) {
1524 efx
->int_error_count
= 0;
1525 efx
->int_error_expire
=
1526 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1528 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1529 netif_err(efx
, hw
, efx
->net_dev
,
1530 "SYSTEM ERROR - reset scheduled\n");
1531 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1533 netif_err(efx
, hw
, efx
->net_dev
,
1534 "SYSTEM ERROR - max number of errors seen."
1535 "NIC will be disabled\n");
1536 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1542 /* Handle a legacy interrupt
1543 * Acknowledges the interrupt and schedule event queue processing.
1545 irqreturn_t
efx_farch_legacy_interrupt(int irq
, void *dev_id
)
1547 struct efx_nic
*efx
= dev_id
;
1548 bool soft_enabled
= ACCESS_ONCE(efx
->irq_soft_enabled
);
1549 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1550 irqreturn_t result
= IRQ_NONE
;
1551 struct efx_channel
*channel
;
1556 /* Read the ISR which also ACKs the interrupts */
1557 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1558 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1560 /* Legacy interrupts are disabled too late by the EEH kernel
1561 * code. Disable them earlier.
1562 * If an EEH error occurred, the read will have returned all ones.
1564 if (EFX_DWORD_IS_ALL_ONES(reg
) && efx_try_recovery(efx
) &&
1565 !efx
->eeh_disabled_legacy_irq
) {
1566 disable_irq_nosync(efx
->legacy_irq
);
1567 efx
->eeh_disabled_legacy_irq
= true;
1570 /* Handle non-event-queue sources */
1571 if (queues
& (1U << efx
->irq_level
) && soft_enabled
) {
1572 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1573 if (unlikely(syserr
))
1574 return efx_farch_fatal_interrupt(efx
);
1575 efx
->last_irq_cpu
= raw_smp_processor_id();
1579 efx
->irq_zero_count
= 0;
1581 /* Schedule processing of any interrupting queues */
1582 if (likely(soft_enabled
)) {
1583 efx_for_each_channel(channel
, efx
) {
1585 efx_schedule_channel_irq(channel
);
1589 result
= IRQ_HANDLED
;
1594 /* Legacy ISR read can return zero once (SF bug 15783) */
1596 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1597 * because this might be a shared interrupt. */
1598 if (efx
->irq_zero_count
++ == 0)
1599 result
= IRQ_HANDLED
;
1601 /* Ensure we schedule or rearm all event queues */
1602 if (likely(soft_enabled
)) {
1603 efx_for_each_channel(channel
, efx
) {
1604 event
= efx_event(channel
,
1605 channel
->eventq_read_ptr
);
1606 if (efx_event_present(event
))
1607 efx_schedule_channel_irq(channel
);
1609 efx_farch_ev_read_ack(channel
);
1614 if (result
== IRQ_HANDLED
)
1615 netif_vdbg(efx
, intr
, efx
->net_dev
,
1616 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1617 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1622 /* Handle an MSI interrupt
1624 * Handle an MSI hardware interrupt. This routine schedules event
1625 * queue processing. No interrupt acknowledgement cycle is necessary.
1626 * Also, we never need to check that the interrupt is for us, since
1627 * MSI interrupts cannot be shared.
1629 irqreturn_t
efx_farch_msi_interrupt(int irq
, void *dev_id
)
1631 struct efx_msi_context
*context
= dev_id
;
1632 struct efx_nic
*efx
= context
->efx
;
1633 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1636 netif_vdbg(efx
, intr
, efx
->net_dev
,
1637 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1638 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1640 if (!likely(ACCESS_ONCE(efx
->irq_soft_enabled
)))
1643 /* Handle non-event-queue sources */
1644 if (context
->index
== efx
->irq_level
) {
1645 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1646 if (unlikely(syserr
))
1647 return efx_farch_fatal_interrupt(efx
);
1648 efx
->last_irq_cpu
= raw_smp_processor_id();
1651 /* Schedule processing of the channel */
1652 efx_schedule_channel_irq(efx
->channel
[context
->index
]);
1657 /* Setup RSS indirection table.
1658 * This maps from the hash value of the packet to RXQ
1660 void efx_farch_rx_push_indir_table(struct efx_nic
*efx
)
1665 BUG_ON(efx_nic_rev(efx
) < EFX_REV_FALCON_B0
);
1667 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1668 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1670 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1671 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1672 efx
->rx_indir_table
[i
]);
1673 efx_writed(efx
, &dword
,
1674 FR_BZ_RX_INDIRECTION_TBL
+
1675 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1679 /* Looks at available SRAM resources and works out how many queues we
1680 * can support, and where things like descriptor caches should live.
1682 * SRAM is split up as follows:
1683 * 0 buftbl entries for channels
1684 * efx->vf_buftbl_base buftbl entries for SR-IOV
1685 * efx->rx_dc_base RX descriptor caches
1686 * efx->tx_dc_base TX descriptor caches
1688 void efx_farch_dimension_resources(struct efx_nic
*efx
, unsigned sram_lim_qw
)
1690 unsigned vi_count
, buftbl_min
;
1692 #ifdef CONFIG_SFC_SRIOV
1693 struct siena_nic_data
*nic_data
= efx
->nic_data
;
1696 /* Account for the buffer table entries backing the datapath channels
1697 * and the descriptor caches for those channels.
1699 buftbl_min
= ((efx
->n_rx_channels
* EFX_MAX_DMAQ_SIZE
+
1700 efx
->n_tx_channels
* EFX_TXQ_TYPES
* EFX_MAX_DMAQ_SIZE
+
1701 efx
->n_channels
* EFX_MAX_EVQ_SIZE
)
1702 * sizeof(efx_qword_t
) / EFX_BUF_SIZE
);
1703 vi_count
= max(efx
->n_channels
, efx
->n_tx_channels
* EFX_TXQ_TYPES
);
1705 #ifdef CONFIG_SFC_SRIOV
1706 if (efx
->type
->sriov_wanted
) {
1707 if (efx
->type
->sriov_wanted(efx
)) {
1708 unsigned vi_dc_entries
, buftbl_free
;
1709 unsigned entries_per_vf
, vf_limit
;
1711 nic_data
->vf_buftbl_base
= buftbl_min
;
1713 vi_dc_entries
= RX_DC_ENTRIES
+ TX_DC_ENTRIES
;
1714 vi_count
= max(vi_count
, EFX_VI_BASE
);
1715 buftbl_free
= (sram_lim_qw
- buftbl_min
-
1716 vi_count
* vi_dc_entries
);
1718 entries_per_vf
= ((vi_dc_entries
+
1719 EFX_VF_BUFTBL_PER_VI
) *
1721 vf_limit
= min(buftbl_free
/ entries_per_vf
,
1722 (1024U - EFX_VI_BASE
) >> efx
->vi_scale
);
1724 if (efx
->vf_count
> vf_limit
) {
1725 netif_err(efx
, probe
, efx
->net_dev
,
1726 "Reducing VF count from from %d to %d\n",
1727 efx
->vf_count
, vf_limit
);
1728 efx
->vf_count
= vf_limit
;
1730 vi_count
+= efx
->vf_count
* efx_vf_size(efx
);
1735 efx
->tx_dc_base
= sram_lim_qw
- vi_count
* TX_DC_ENTRIES
;
1736 efx
->rx_dc_base
= efx
->tx_dc_base
- vi_count
* RX_DC_ENTRIES
;
1739 u32
efx_farch_fpga_ver(struct efx_nic
*efx
)
1741 efx_oword_t altera_build
;
1742 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1743 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1746 void efx_farch_init_common(struct efx_nic
*efx
)
1750 /* Set positions of descriptor caches in SRAM. */
1751 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, efx
->tx_dc_base
);
1752 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1753 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, efx
->rx_dc_base
);
1754 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1756 /* Set TX descriptor cache size. */
1757 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1758 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1759 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1761 /* Set RX descriptor cache size. Set low watermark to size-8, as
1762 * this allows most efficient prefetching.
1764 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1765 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1766 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1767 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1768 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1770 /* Program INT_KER address */
1771 EFX_POPULATE_OWORD_2(temp
,
1772 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1773 EFX_INT_MODE_USE_MSI(efx
),
1774 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1775 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1777 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1778 /* Use an interrupt level unused by event queues */
1779 efx
->irq_level
= 0x1f;
1781 /* Use a valid MSI-X vector */
1784 /* Enable all the genuinely fatal interrupts. (They are still
1785 * masked by the overall interrupt mask, controlled by
1786 * falcon_interrupts()).
1788 * Note: All other fatal interrupts are enabled
1790 EFX_POPULATE_OWORD_3(temp
,
1791 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1792 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1793 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1794 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1795 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1796 EFX_INVERT_OWORD(temp
);
1797 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1799 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1800 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1802 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1803 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1804 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1805 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1806 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1807 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1808 /* Enable SW_EV to inherit in char driver - assume harmless here */
1809 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1810 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1811 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1812 /* Disable hardware watchdog which can misfire */
1813 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1814 /* Squash TX of packets of 16 bytes or less */
1815 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1816 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1817 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1819 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1820 EFX_POPULATE_OWORD_4(temp
,
1821 /* Default values */
1822 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1823 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1824 FRF_BZ_TX_PACE_FB_BASE
, 0,
1825 /* Allow large pace values in the
1827 FRF_BZ_TX_PACE_BIN_TH
,
1828 FFE_BZ_TX_PACE_RESERVED
);
1829 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1833 /**************************************************************************
1837 **************************************************************************
1840 /* "Fudge factors" - difference between programmed value and actual depth.
1841 * Due to pipelined implementation we need to program H/W with a value that
1842 * is larger than the hop limit we want.
1844 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1845 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1847 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1848 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1851 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1853 /* Don't try very hard to find space for performance hints, as this is
1854 * counter-productive. */
1855 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1857 enum efx_farch_filter_type
{
1858 EFX_FARCH_FILTER_TCP_FULL
= 0,
1859 EFX_FARCH_FILTER_TCP_WILD
,
1860 EFX_FARCH_FILTER_UDP_FULL
,
1861 EFX_FARCH_FILTER_UDP_WILD
,
1862 EFX_FARCH_FILTER_MAC_FULL
= 4,
1863 EFX_FARCH_FILTER_MAC_WILD
,
1864 EFX_FARCH_FILTER_UC_DEF
= 8,
1865 EFX_FARCH_FILTER_MC_DEF
,
1866 EFX_FARCH_FILTER_TYPE_COUNT
, /* number of specific types */
1869 enum efx_farch_filter_table_id
{
1870 EFX_FARCH_FILTER_TABLE_RX_IP
= 0,
1871 EFX_FARCH_FILTER_TABLE_RX_MAC
,
1872 EFX_FARCH_FILTER_TABLE_RX_DEF
,
1873 EFX_FARCH_FILTER_TABLE_TX_MAC
,
1874 EFX_FARCH_FILTER_TABLE_COUNT
,
1877 enum efx_farch_filter_index
{
1878 EFX_FARCH_FILTER_INDEX_UC_DEF
,
1879 EFX_FARCH_FILTER_INDEX_MC_DEF
,
1880 EFX_FARCH_FILTER_SIZE_RX_DEF
,
1883 struct efx_farch_filter_spec
{
1891 struct efx_farch_filter_table
{
1892 enum efx_farch_filter_table_id id
;
1893 u32 offset
; /* address of table relative to BAR */
1894 unsigned size
; /* number of entries */
1895 unsigned step
; /* step between entries */
1896 unsigned used
; /* number currently used */
1897 unsigned long *used_bitmap
;
1898 struct efx_farch_filter_spec
*spec
;
1899 unsigned search_limit
[EFX_FARCH_FILTER_TYPE_COUNT
];
1902 struct efx_farch_filter_state
{
1903 struct efx_farch_filter_table table
[EFX_FARCH_FILTER_TABLE_COUNT
];
1907 efx_farch_filter_table_clear_entry(struct efx_nic
*efx
,
1908 struct efx_farch_filter_table
*table
,
1909 unsigned int filter_idx
);
1911 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1912 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1913 static u16
efx_farch_filter_hash(u32 key
)
1917 /* First 16 rounds */
1918 tmp
= 0x1fff ^ key
>> 16;
1919 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1920 tmp
= tmp
^ tmp
>> 9;
1921 /* Last 16 rounds */
1922 tmp
= tmp
^ tmp
<< 13 ^ key
;
1923 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1924 return tmp
^ tmp
>> 9;
1927 /* To allow for hash collisions, filter search continues at these
1928 * increments from the first possible entry selected by the hash. */
1929 static u16
efx_farch_filter_increment(u32 key
)
1934 static enum efx_farch_filter_table_id
1935 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec
*spec
)
1937 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1938 (EFX_FARCH_FILTER_TCP_FULL
>> 2));
1939 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1940 (EFX_FARCH_FILTER_TCP_WILD
>> 2));
1941 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1942 (EFX_FARCH_FILTER_UDP_FULL
>> 2));
1943 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1944 (EFX_FARCH_FILTER_UDP_WILD
>> 2));
1945 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC
!=
1946 (EFX_FARCH_FILTER_MAC_FULL
>> 2));
1947 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC
!=
1948 (EFX_FARCH_FILTER_MAC_WILD
>> 2));
1949 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC
!=
1950 EFX_FARCH_FILTER_TABLE_RX_MAC
+ 2);
1951 return (spec
->type
>> 2) + ((spec
->flags
& EFX_FILTER_FLAG_TX
) ? 2 : 0);
1954 static void efx_farch_filter_push_rx_config(struct efx_nic
*efx
)
1956 struct efx_farch_filter_state
*state
= efx
->filter_state
;
1957 struct efx_farch_filter_table
*table
;
1958 efx_oword_t filter_ctl
;
1960 efx_reado(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
1962 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
1963 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_FULL_SRCH_LIMIT
,
1964 table
->search_limit
[EFX_FARCH_FILTER_TCP_FULL
] +
1965 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1966 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_WILD_SRCH_LIMIT
,
1967 table
->search_limit
[EFX_FARCH_FILTER_TCP_WILD
] +
1968 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1969 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_FULL_SRCH_LIMIT
,
1970 table
->search_limit
[EFX_FARCH_FILTER_UDP_FULL
] +
1971 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1972 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_WILD_SRCH_LIMIT
,
1973 table
->search_limit
[EFX_FARCH_FILTER_UDP_WILD
] +
1974 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1976 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_MAC
];
1978 EFX_SET_OWORD_FIELD(
1979 filter_ctl
, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT
,
1980 table
->search_limit
[EFX_FARCH_FILTER_MAC_FULL
] +
1981 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1982 EFX_SET_OWORD_FIELD(
1983 filter_ctl
, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT
,
1984 table
->search_limit
[EFX_FARCH_FILTER_MAC_WILD
] +
1985 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1988 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
1990 EFX_SET_OWORD_FIELD(
1991 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_Q_ID
,
1992 table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].dmaq_id
);
1993 EFX_SET_OWORD_FIELD(
1994 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED
,
1995 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1996 EFX_FILTER_FLAG_RX_RSS
));
1997 EFX_SET_OWORD_FIELD(
1998 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_Q_ID
,
1999 table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].dmaq_id
);
2000 EFX_SET_OWORD_FIELD(
2001 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED
,
2002 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].flags
&
2003 EFX_FILTER_FLAG_RX_RSS
));
2005 /* There is a single bit to enable RX scatter for all
2006 * unmatched packets. Only set it if scatter is
2007 * enabled in both filter specs.
2009 EFX_SET_OWORD_FIELD(
2010 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
2011 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].flags
&
2012 table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].flags
&
2013 EFX_FILTER_FLAG_RX_SCATTER
));
2014 } else if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
2015 /* We don't expose 'default' filters because unmatched
2016 * packets always go to the queue number found in the
2017 * RSS table. But we still need to set the RX scatter
2020 EFX_SET_OWORD_FIELD(
2021 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
2025 efx_writeo(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
2028 static void efx_farch_filter_push_tx_limits(struct efx_nic
*efx
)
2030 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2031 struct efx_farch_filter_table
*table
;
2034 efx_reado(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
2036 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_TX_MAC
];
2038 EFX_SET_OWORD_FIELD(
2039 tx_cfg
, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE
,
2040 table
->search_limit
[EFX_FARCH_FILTER_MAC_FULL
] +
2041 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
2042 EFX_SET_OWORD_FIELD(
2043 tx_cfg
, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE
,
2044 table
->search_limit
[EFX_FARCH_FILTER_MAC_WILD
] +
2045 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
2048 efx_writeo(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
2052 efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec
*spec
,
2053 const struct efx_filter_spec
*gen_spec
)
2055 bool is_full
= false;
2057 if ((gen_spec
->flags
& EFX_FILTER_FLAG_RX_RSS
) &&
2058 gen_spec
->rss_context
!= EFX_FILTER_RSS_CONTEXT_DEFAULT
)
2061 spec
->priority
= gen_spec
->priority
;
2062 spec
->flags
= gen_spec
->flags
;
2063 spec
->dmaq_id
= gen_spec
->dmaq_id
;
2065 switch (gen_spec
->match_flags
) {
2066 case (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
2067 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
|
2068 EFX_FILTER_MATCH_REM_HOST
| EFX_FILTER_MATCH_REM_PORT
):
2071 case (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
2072 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
): {
2073 __be32 rhost
, host1
, host2
;
2074 __be16 rport
, port1
, port2
;
2076 EFX_BUG_ON_PARANOID(!(gen_spec
->flags
& EFX_FILTER_FLAG_RX
));
2078 if (gen_spec
->ether_type
!= htons(ETH_P_IP
))
2079 return -EPROTONOSUPPORT
;
2080 if (gen_spec
->loc_port
== 0 ||
2081 (is_full
&& gen_spec
->rem_port
== 0))
2082 return -EADDRNOTAVAIL
;
2083 switch (gen_spec
->ip_proto
) {
2085 spec
->type
= (is_full
? EFX_FARCH_FILTER_TCP_FULL
:
2086 EFX_FARCH_FILTER_TCP_WILD
);
2089 spec
->type
= (is_full
? EFX_FARCH_FILTER_UDP_FULL
:
2090 EFX_FARCH_FILTER_UDP_WILD
);
2093 return -EPROTONOSUPPORT
;
2096 /* Filter is constructed in terms of source and destination,
2097 * with the odd wrinkle that the ports are swapped in a UDP
2098 * wildcard filter. We need to convert from local and remote
2099 * (= zero for wildcard) addresses.
2101 rhost
= is_full
? gen_spec
->rem_host
[0] : 0;
2102 rport
= is_full
? gen_spec
->rem_port
: 0;
2104 host2
= gen_spec
->loc_host
[0];
2105 if (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
) {
2106 port1
= gen_spec
->loc_port
;
2110 port2
= gen_spec
->loc_port
;
2112 spec
->data
[0] = ntohl(host1
) << 16 | ntohs(port1
);
2113 spec
->data
[1] = ntohs(port2
) << 16 | ntohl(host1
) >> 16;
2114 spec
->data
[2] = ntohl(host2
);
2119 case EFX_FILTER_MATCH_LOC_MAC
| EFX_FILTER_MATCH_OUTER_VID
:
2122 case EFX_FILTER_MATCH_LOC_MAC
:
2123 spec
->type
= (is_full
? EFX_FARCH_FILTER_MAC_FULL
:
2124 EFX_FARCH_FILTER_MAC_WILD
);
2125 spec
->data
[0] = is_full
? ntohs(gen_spec
->outer_vid
) : 0;
2126 spec
->data
[1] = (gen_spec
->loc_mac
[2] << 24 |
2127 gen_spec
->loc_mac
[3] << 16 |
2128 gen_spec
->loc_mac
[4] << 8 |
2129 gen_spec
->loc_mac
[5]);
2130 spec
->data
[2] = (gen_spec
->loc_mac
[0] << 8 |
2131 gen_spec
->loc_mac
[1]);
2134 case EFX_FILTER_MATCH_LOC_MAC_IG
:
2135 spec
->type
= (is_multicast_ether_addr(gen_spec
->loc_mac
) ?
2136 EFX_FARCH_FILTER_MC_DEF
:
2137 EFX_FARCH_FILTER_UC_DEF
);
2138 memset(spec
->data
, 0, sizeof(spec
->data
)); /* ensure equality */
2142 return -EPROTONOSUPPORT
;
2149 efx_farch_filter_to_gen_spec(struct efx_filter_spec
*gen_spec
,
2150 const struct efx_farch_filter_spec
*spec
)
2152 bool is_full
= false;
2154 /* *gen_spec should be completely initialised, to be consistent
2155 * with efx_filter_init_{rx,tx}() and in case we want to copy
2156 * it back to userland.
2158 memset(gen_spec
, 0, sizeof(*gen_spec
));
2160 gen_spec
->priority
= spec
->priority
;
2161 gen_spec
->flags
= spec
->flags
;
2162 gen_spec
->dmaq_id
= spec
->dmaq_id
;
2164 switch (spec
->type
) {
2165 case EFX_FARCH_FILTER_TCP_FULL
:
2166 case EFX_FARCH_FILTER_UDP_FULL
:
2169 case EFX_FARCH_FILTER_TCP_WILD
:
2170 case EFX_FARCH_FILTER_UDP_WILD
: {
2171 __be32 host1
, host2
;
2172 __be16 port1
, port2
;
2174 gen_spec
->match_flags
=
2175 EFX_FILTER_MATCH_ETHER_TYPE
|
2176 EFX_FILTER_MATCH_IP_PROTO
|
2177 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
;
2179 gen_spec
->match_flags
|= (EFX_FILTER_MATCH_REM_HOST
|
2180 EFX_FILTER_MATCH_REM_PORT
);
2181 gen_spec
->ether_type
= htons(ETH_P_IP
);
2182 gen_spec
->ip_proto
=
2183 (spec
->type
== EFX_FARCH_FILTER_TCP_FULL
||
2184 spec
->type
== EFX_FARCH_FILTER_TCP_WILD
) ?
2185 IPPROTO_TCP
: IPPROTO_UDP
;
2187 host1
= htonl(spec
->data
[0] >> 16 | spec
->data
[1] << 16);
2188 port1
= htons(spec
->data
[0]);
2189 host2
= htonl(spec
->data
[2]);
2190 port2
= htons(spec
->data
[1] >> 16);
2191 if (spec
->flags
& EFX_FILTER_FLAG_TX
) {
2192 gen_spec
->loc_host
[0] = host1
;
2193 gen_spec
->rem_host
[0] = host2
;
2195 gen_spec
->loc_host
[0] = host2
;
2196 gen_spec
->rem_host
[0] = host1
;
2198 if (!!(gen_spec
->flags
& EFX_FILTER_FLAG_TX
) ^
2199 (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
)) {
2200 gen_spec
->loc_port
= port1
;
2201 gen_spec
->rem_port
= port2
;
2203 gen_spec
->loc_port
= port2
;
2204 gen_spec
->rem_port
= port1
;
2210 case EFX_FARCH_FILTER_MAC_FULL
:
2213 case EFX_FARCH_FILTER_MAC_WILD
:
2214 gen_spec
->match_flags
= EFX_FILTER_MATCH_LOC_MAC
;
2216 gen_spec
->match_flags
|= EFX_FILTER_MATCH_OUTER_VID
;
2217 gen_spec
->loc_mac
[0] = spec
->data
[2] >> 8;
2218 gen_spec
->loc_mac
[1] = spec
->data
[2];
2219 gen_spec
->loc_mac
[2] = spec
->data
[1] >> 24;
2220 gen_spec
->loc_mac
[3] = spec
->data
[1] >> 16;
2221 gen_spec
->loc_mac
[4] = spec
->data
[1] >> 8;
2222 gen_spec
->loc_mac
[5] = spec
->data
[1];
2223 gen_spec
->outer_vid
= htons(spec
->data
[0]);
2226 case EFX_FARCH_FILTER_UC_DEF
:
2227 case EFX_FARCH_FILTER_MC_DEF
:
2228 gen_spec
->match_flags
= EFX_FILTER_MATCH_LOC_MAC_IG
;
2229 gen_spec
->loc_mac
[0] = spec
->type
== EFX_FARCH_FILTER_MC_DEF
;
2239 efx_farch_filter_init_rx_auto(struct efx_nic
*efx
,
2240 struct efx_farch_filter_spec
*spec
)
2242 /* If there's only one channel then disable RSS for non VF
2243 * traffic, thereby allowing VFs to use RSS when the PF can't.
2245 spec
->priority
= EFX_FILTER_PRI_AUTO
;
2246 spec
->flags
= (EFX_FILTER_FLAG_RX
|
2247 (efx_rss_enabled(efx
) ? EFX_FILTER_FLAG_RX_RSS
: 0) |
2248 (efx
->rx_scatter
? EFX_FILTER_FLAG_RX_SCATTER
: 0));
2252 /* Build a filter entry and return its n-tuple key. */
2253 static u32
efx_farch_filter_build(efx_oword_t
*filter
,
2254 struct efx_farch_filter_spec
*spec
)
2258 switch (efx_farch_filter_spec_table_id(spec
)) {
2259 case EFX_FARCH_FILTER_TABLE_RX_IP
: {
2260 bool is_udp
= (spec
->type
== EFX_FARCH_FILTER_UDP_FULL
||
2261 spec
->type
== EFX_FARCH_FILTER_UDP_WILD
);
2262 EFX_POPULATE_OWORD_7(
2265 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
2267 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
2268 FRF_BZ_TCP_UDP
, is_udp
,
2269 FRF_BZ_RXQ_ID
, spec
->dmaq_id
,
2270 EFX_DWORD_2
, spec
->data
[2],
2271 EFX_DWORD_1
, spec
->data
[1],
2272 EFX_DWORD_0
, spec
->data
[0]);
2277 case EFX_FARCH_FILTER_TABLE_RX_MAC
: {
2278 bool is_wild
= spec
->type
== EFX_FARCH_FILTER_MAC_WILD
;
2279 EFX_POPULATE_OWORD_7(
2282 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
2283 FRF_CZ_RMFT_SCATTER_EN
,
2284 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
2285 FRF_CZ_RMFT_RXQ_ID
, spec
->dmaq_id
,
2286 FRF_CZ_RMFT_WILDCARD_MATCH
, is_wild
,
2287 FRF_CZ_RMFT_DEST_MAC_HI
, spec
->data
[2],
2288 FRF_CZ_RMFT_DEST_MAC_LO
, spec
->data
[1],
2289 FRF_CZ_RMFT_VLAN_ID
, spec
->data
[0]);
2294 case EFX_FARCH_FILTER_TABLE_TX_MAC
: {
2295 bool is_wild
= spec
->type
== EFX_FARCH_FILTER_MAC_WILD
;
2296 EFX_POPULATE_OWORD_5(*filter
,
2297 FRF_CZ_TMFT_TXQ_ID
, spec
->dmaq_id
,
2298 FRF_CZ_TMFT_WILDCARD_MATCH
, is_wild
,
2299 FRF_CZ_TMFT_SRC_MAC_HI
, spec
->data
[2],
2300 FRF_CZ_TMFT_SRC_MAC_LO
, spec
->data
[1],
2301 FRF_CZ_TMFT_VLAN_ID
, spec
->data
[0]);
2302 data3
= is_wild
| spec
->dmaq_id
<< 1;
2310 return spec
->data
[0] ^ spec
->data
[1] ^ spec
->data
[2] ^ data3
;
2313 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec
*left
,
2314 const struct efx_farch_filter_spec
*right
)
2316 if (left
->type
!= right
->type
||
2317 memcmp(left
->data
, right
->data
, sizeof(left
->data
)))
2320 if (left
->flags
& EFX_FILTER_FLAG_TX
&&
2321 left
->dmaq_id
!= right
->dmaq_id
)
2328 * Construct/deconstruct external filter IDs. At least the RX filter
2329 * IDs must be ordered by matching priority, for RX NFC semantics.
2331 * Deconstruction needs to be robust against invalid IDs so that
2332 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2333 * accept user-provided IDs.
2336 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2338 static const u8 efx_farch_filter_type_match_pri
[EFX_FARCH_FILTER_TYPE_COUNT
] = {
2339 [EFX_FARCH_FILTER_TCP_FULL
] = 0,
2340 [EFX_FARCH_FILTER_UDP_FULL
] = 0,
2341 [EFX_FARCH_FILTER_TCP_WILD
] = 1,
2342 [EFX_FARCH_FILTER_UDP_WILD
] = 1,
2343 [EFX_FARCH_FILTER_MAC_FULL
] = 2,
2344 [EFX_FARCH_FILTER_MAC_WILD
] = 3,
2345 [EFX_FARCH_FILTER_UC_DEF
] = 4,
2346 [EFX_FARCH_FILTER_MC_DEF
] = 4,
2349 static const enum efx_farch_filter_table_id efx_farch_filter_range_table
[] = {
2350 EFX_FARCH_FILTER_TABLE_RX_IP
, /* RX match pri 0 */
2351 EFX_FARCH_FILTER_TABLE_RX_IP
,
2352 EFX_FARCH_FILTER_TABLE_RX_MAC
,
2353 EFX_FARCH_FILTER_TABLE_RX_MAC
,
2354 EFX_FARCH_FILTER_TABLE_RX_DEF
, /* RX match pri 4 */
2355 EFX_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 0 */
2356 EFX_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 1 */
2359 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
2360 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2363 efx_farch_filter_make_id(const struct efx_farch_filter_spec
*spec
,
2368 range
= efx_farch_filter_type_match_pri
[spec
->type
];
2369 if (!(spec
->flags
& EFX_FILTER_FLAG_RX
))
2370 range
+= EFX_FARCH_FILTER_MATCH_PRI_COUNT
;
2372 return range
<< EFX_FARCH_FILTER_INDEX_WIDTH
| index
;
2375 static inline enum efx_farch_filter_table_id
2376 efx_farch_filter_id_table_id(u32 id
)
2378 unsigned int range
= id
>> EFX_FARCH_FILTER_INDEX_WIDTH
;
2380 if (range
< ARRAY_SIZE(efx_farch_filter_range_table
))
2381 return efx_farch_filter_range_table
[range
];
2383 return EFX_FARCH_FILTER_TABLE_COUNT
; /* invalid */
2386 static inline unsigned int efx_farch_filter_id_index(u32 id
)
2388 return id
& EFX_FARCH_FILTER_INDEX_MASK
;
2391 u32
efx_farch_filter_get_rx_id_limit(struct efx_nic
*efx
)
2393 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2394 unsigned int range
= EFX_FARCH_FILTER_MATCH_PRI_COUNT
- 1;
2395 enum efx_farch_filter_table_id table_id
;
2398 table_id
= efx_farch_filter_range_table
[range
];
2399 if (state
->table
[table_id
].size
!= 0)
2400 return range
<< EFX_FARCH_FILTER_INDEX_WIDTH
|
2401 state
->table
[table_id
].size
;
2407 s32
efx_farch_filter_insert(struct efx_nic
*efx
,
2408 struct efx_filter_spec
*gen_spec
,
2411 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2412 struct efx_farch_filter_table
*table
;
2413 struct efx_farch_filter_spec spec
;
2415 int rep_index
, ins_index
;
2416 unsigned int depth
= 0;
2419 rc
= efx_farch_filter_from_gen_spec(&spec
, gen_spec
);
2423 table
= &state
->table
[efx_farch_filter_spec_table_id(&spec
)];
2424 if (table
->size
== 0)
2427 netif_vdbg(efx
, hw
, efx
->net_dev
,
2428 "%s: type %d search_limit=%d", __func__
, spec
.type
,
2429 table
->search_limit
[spec
.type
]);
2431 if (table
->id
== EFX_FARCH_FILTER_TABLE_RX_DEF
) {
2432 /* One filter spec per type */
2433 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF
!= 0);
2434 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF
!=
2435 EFX_FARCH_FILTER_MC_DEF
- EFX_FARCH_FILTER_UC_DEF
);
2436 rep_index
= spec
.type
- EFX_FARCH_FILTER_UC_DEF
;
2437 ins_index
= rep_index
;
2439 spin_lock_bh(&efx
->filter_lock
);
2441 /* Search concurrently for
2442 * (1) a filter to be replaced (rep_index): any filter
2443 * with the same match values, up to the current
2444 * search depth for this type, and
2445 * (2) the insertion point (ins_index): (1) or any
2446 * free slot before it or up to the maximum search
2447 * depth for this priority
2448 * We fail if we cannot find (2).
2450 * We can stop once either
2451 * (a) we find (1), in which case we have definitely
2452 * found (2) as well; or
2453 * (b) we have searched exhaustively for (1), and have
2454 * either found (2) or searched exhaustively for it
2456 u32 key
= efx_farch_filter_build(&filter
, &spec
);
2457 unsigned int hash
= efx_farch_filter_hash(key
);
2458 unsigned int incr
= efx_farch_filter_increment(key
);
2459 unsigned int max_rep_depth
= table
->search_limit
[spec
.type
];
2460 unsigned int max_ins_depth
=
2461 spec
.priority
<= EFX_FILTER_PRI_HINT
?
2462 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX
:
2463 EFX_FARCH_FILTER_CTL_SRCH_MAX
;
2464 unsigned int i
= hash
& (table
->size
- 1);
2469 spin_lock_bh(&efx
->filter_lock
);
2472 if (!test_bit(i
, table
->used_bitmap
)) {
2475 } else if (efx_farch_filter_equal(&spec
,
2484 if (depth
>= max_rep_depth
&&
2485 (ins_index
>= 0 || depth
>= max_ins_depth
)) {
2487 if (ins_index
< 0) {
2495 i
= (i
+ incr
) & (table
->size
- 1);
2500 /* If we found a filter to be replaced, check whether we
2503 if (rep_index
>= 0) {
2504 struct efx_farch_filter_spec
*saved_spec
=
2505 &table
->spec
[rep_index
];
2507 if (spec
.priority
== saved_spec
->priority
&& !replace_equal
) {
2511 if (spec
.priority
< saved_spec
->priority
) {
2515 if (saved_spec
->priority
== EFX_FILTER_PRI_AUTO
||
2516 saved_spec
->flags
& EFX_FILTER_FLAG_RX_OVER_AUTO
)
2517 spec
.flags
|= EFX_FILTER_FLAG_RX_OVER_AUTO
;
2520 /* Insert the filter */
2521 if (ins_index
!= rep_index
) {
2522 __set_bit(ins_index
, table
->used_bitmap
);
2525 table
->spec
[ins_index
] = spec
;
2527 if (table
->id
== EFX_FARCH_FILTER_TABLE_RX_DEF
) {
2528 efx_farch_filter_push_rx_config(efx
);
2530 if (table
->search_limit
[spec
.type
] < depth
) {
2531 table
->search_limit
[spec
.type
] = depth
;
2532 if (spec
.flags
& EFX_FILTER_FLAG_TX
)
2533 efx_farch_filter_push_tx_limits(efx
);
2535 efx_farch_filter_push_rx_config(efx
);
2538 efx_writeo(efx
, &filter
,
2539 table
->offset
+ table
->step
* ins_index
);
2541 /* If we were able to replace a filter by inserting
2542 * at a lower depth, clear the replaced filter
2544 if (ins_index
!= rep_index
&& rep_index
>= 0)
2545 efx_farch_filter_table_clear_entry(efx
, table
,
2549 netif_vdbg(efx
, hw
, efx
->net_dev
,
2550 "%s: filter type %d index %d rxq %u set",
2551 __func__
, spec
.type
, ins_index
, spec
.dmaq_id
);
2552 rc
= efx_farch_filter_make_id(&spec
, ins_index
);
2555 spin_unlock_bh(&efx
->filter_lock
);
2560 efx_farch_filter_table_clear_entry(struct efx_nic
*efx
,
2561 struct efx_farch_filter_table
*table
,
2562 unsigned int filter_idx
)
2564 static efx_oword_t filter
;
2566 EFX_WARN_ON_PARANOID(!test_bit(filter_idx
, table
->used_bitmap
));
2567 BUG_ON(table
->offset
== 0); /* can't clear MAC default filters */
2569 __clear_bit(filter_idx
, table
->used_bitmap
);
2571 memset(&table
->spec
[filter_idx
], 0, sizeof(table
->spec
[0]));
2573 efx_writeo(efx
, &filter
, table
->offset
+ table
->step
* filter_idx
);
2575 /* If this filter required a greater search depth than
2576 * any other, the search limit for its type can now be
2577 * decreased. However, it is hard to determine that
2578 * unless the table has become completely empty - in
2579 * which case, all its search limits can be set to 0.
2581 if (unlikely(table
->used
== 0)) {
2582 memset(table
->search_limit
, 0, sizeof(table
->search_limit
));
2583 if (table
->id
== EFX_FARCH_FILTER_TABLE_TX_MAC
)
2584 efx_farch_filter_push_tx_limits(efx
);
2586 efx_farch_filter_push_rx_config(efx
);
2590 static int efx_farch_filter_remove(struct efx_nic
*efx
,
2591 struct efx_farch_filter_table
*table
,
2592 unsigned int filter_idx
,
2593 enum efx_filter_priority priority
)
2595 struct efx_farch_filter_spec
*spec
= &table
->spec
[filter_idx
];
2597 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2598 spec
->priority
!= priority
)
2601 if (spec
->flags
& EFX_FILTER_FLAG_RX_OVER_AUTO
) {
2602 efx_farch_filter_init_rx_auto(efx
, spec
);
2603 efx_farch_filter_push_rx_config(efx
);
2605 efx_farch_filter_table_clear_entry(efx
, table
, filter_idx
);
2611 int efx_farch_filter_remove_safe(struct efx_nic
*efx
,
2612 enum efx_filter_priority priority
,
2615 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2616 enum efx_farch_filter_table_id table_id
;
2617 struct efx_farch_filter_table
*table
;
2618 unsigned int filter_idx
;
2619 struct efx_farch_filter_spec
*spec
;
2622 table_id
= efx_farch_filter_id_table_id(filter_id
);
2623 if ((unsigned int)table_id
>= EFX_FARCH_FILTER_TABLE_COUNT
)
2625 table
= &state
->table
[table_id
];
2627 filter_idx
= efx_farch_filter_id_index(filter_id
);
2628 if (filter_idx
>= table
->size
)
2630 spec
= &table
->spec
[filter_idx
];
2632 spin_lock_bh(&efx
->filter_lock
);
2633 rc
= efx_farch_filter_remove(efx
, table
, filter_idx
, priority
);
2634 spin_unlock_bh(&efx
->filter_lock
);
2639 int efx_farch_filter_get_safe(struct efx_nic
*efx
,
2640 enum efx_filter_priority priority
,
2641 u32 filter_id
, struct efx_filter_spec
*spec_buf
)
2643 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2644 enum efx_farch_filter_table_id table_id
;
2645 struct efx_farch_filter_table
*table
;
2646 struct efx_farch_filter_spec
*spec
;
2647 unsigned int filter_idx
;
2650 table_id
= efx_farch_filter_id_table_id(filter_id
);
2651 if ((unsigned int)table_id
>= EFX_FARCH_FILTER_TABLE_COUNT
)
2653 table
= &state
->table
[table_id
];
2655 filter_idx
= efx_farch_filter_id_index(filter_id
);
2656 if (filter_idx
>= table
->size
)
2658 spec
= &table
->spec
[filter_idx
];
2660 spin_lock_bh(&efx
->filter_lock
);
2662 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2663 spec
->priority
== priority
) {
2664 efx_farch_filter_to_gen_spec(spec_buf
, spec
);
2670 spin_unlock_bh(&efx
->filter_lock
);
2676 efx_farch_filter_table_clear(struct efx_nic
*efx
,
2677 enum efx_farch_filter_table_id table_id
,
2678 enum efx_filter_priority priority
)
2680 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2681 struct efx_farch_filter_table
*table
= &state
->table
[table_id
];
2682 unsigned int filter_idx
;
2684 spin_lock_bh(&efx
->filter_lock
);
2685 for (filter_idx
= 0; filter_idx
< table
->size
; ++filter_idx
) {
2686 if (table
->spec
[filter_idx
].priority
!= EFX_FILTER_PRI_AUTO
)
2687 efx_farch_filter_remove(efx
, table
,
2688 filter_idx
, priority
);
2690 spin_unlock_bh(&efx
->filter_lock
);
2693 int efx_farch_filter_clear_rx(struct efx_nic
*efx
,
2694 enum efx_filter_priority priority
)
2696 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_IP
,
2698 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_MAC
,
2700 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_DEF
,
2705 u32
efx_farch_filter_count_rx_used(struct efx_nic
*efx
,
2706 enum efx_filter_priority priority
)
2708 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2709 enum efx_farch_filter_table_id table_id
;
2710 struct efx_farch_filter_table
*table
;
2711 unsigned int filter_idx
;
2714 spin_lock_bh(&efx
->filter_lock
);
2716 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2717 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2719 table
= &state
->table
[table_id
];
2720 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2721 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2722 table
->spec
[filter_idx
].priority
== priority
)
2727 spin_unlock_bh(&efx
->filter_lock
);
2732 s32
efx_farch_filter_get_rx_ids(struct efx_nic
*efx
,
2733 enum efx_filter_priority priority
,
2736 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2737 enum efx_farch_filter_table_id table_id
;
2738 struct efx_farch_filter_table
*table
;
2739 unsigned int filter_idx
;
2742 spin_lock_bh(&efx
->filter_lock
);
2744 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2745 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2747 table
= &state
->table
[table_id
];
2748 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2749 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2750 table
->spec
[filter_idx
].priority
== priority
) {
2751 if (count
== size
) {
2755 buf
[count
++] = efx_farch_filter_make_id(
2756 &table
->spec
[filter_idx
], filter_idx
);
2761 spin_unlock_bh(&efx
->filter_lock
);
2766 /* Restore filter stater after reset */
2767 void efx_farch_filter_table_restore(struct efx_nic
*efx
)
2769 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2770 enum efx_farch_filter_table_id table_id
;
2771 struct efx_farch_filter_table
*table
;
2773 unsigned int filter_idx
;
2775 spin_lock_bh(&efx
->filter_lock
);
2777 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2778 table
= &state
->table
[table_id
];
2780 /* Check whether this is a regular register table */
2781 if (table
->step
== 0)
2784 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2785 if (!test_bit(filter_idx
, table
->used_bitmap
))
2787 efx_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2788 efx_writeo(efx
, &filter
,
2789 table
->offset
+ table
->step
* filter_idx
);
2793 efx_farch_filter_push_rx_config(efx
);
2794 efx_farch_filter_push_tx_limits(efx
);
2796 spin_unlock_bh(&efx
->filter_lock
);
2799 void efx_farch_filter_table_remove(struct efx_nic
*efx
)
2801 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2802 enum efx_farch_filter_table_id table_id
;
2804 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2805 kfree(state
->table
[table_id
].used_bitmap
);
2806 vfree(state
->table
[table_id
].spec
);
2811 int efx_farch_filter_table_probe(struct efx_nic
*efx
)
2813 struct efx_farch_filter_state
*state
;
2814 struct efx_farch_filter_table
*table
;
2817 state
= kzalloc(sizeof(struct efx_farch_filter_state
), GFP_KERNEL
);
2820 efx
->filter_state
= state
;
2822 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
2823 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
2824 table
->id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2825 table
->offset
= FR_BZ_RX_FILTER_TBL0
;
2826 table
->size
= FR_BZ_RX_FILTER_TBL0_ROWS
;
2827 table
->step
= FR_BZ_RX_FILTER_TBL0_STEP
;
2830 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
2831 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_MAC
];
2832 table
->id
= EFX_FARCH_FILTER_TABLE_RX_MAC
;
2833 table
->offset
= FR_CZ_RX_MAC_FILTER_TBL0
;
2834 table
->size
= FR_CZ_RX_MAC_FILTER_TBL0_ROWS
;
2835 table
->step
= FR_CZ_RX_MAC_FILTER_TBL0_STEP
;
2837 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
2838 table
->id
= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2839 table
->size
= EFX_FARCH_FILTER_SIZE_RX_DEF
;
2841 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_TX_MAC
];
2842 table
->id
= EFX_FARCH_FILTER_TABLE_TX_MAC
;
2843 table
->offset
= FR_CZ_TX_MAC_FILTER_TBL0
;
2844 table
->size
= FR_CZ_TX_MAC_FILTER_TBL0_ROWS
;
2845 table
->step
= FR_CZ_TX_MAC_FILTER_TBL0_STEP
;
2848 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2849 table
= &state
->table
[table_id
];
2850 if (table
->size
== 0)
2852 table
->used_bitmap
= kcalloc(BITS_TO_LONGS(table
->size
),
2853 sizeof(unsigned long),
2855 if (!table
->used_bitmap
)
2857 table
->spec
= vzalloc(table
->size
* sizeof(*table
->spec
));
2862 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
2864 /* RX default filters must always exist */
2865 struct efx_farch_filter_spec
*spec
;
2868 for (i
= 0; i
< EFX_FARCH_FILTER_SIZE_RX_DEF
; i
++) {
2869 spec
= &table
->spec
[i
];
2870 spec
->type
= EFX_FARCH_FILTER_UC_DEF
+ i
;
2871 efx_farch_filter_init_rx_auto(efx
, spec
);
2872 __set_bit(i
, table
->used_bitmap
);
2876 efx_farch_filter_push_rx_config(efx
);
2881 efx_farch_filter_table_remove(efx
);
2885 /* Update scatter enable flags for filters pointing to our own RX queues */
2886 void efx_farch_filter_update_rx_scatter(struct efx_nic
*efx
)
2888 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2889 enum efx_farch_filter_table_id table_id
;
2890 struct efx_farch_filter_table
*table
;
2892 unsigned int filter_idx
;
2894 spin_lock_bh(&efx
->filter_lock
);
2896 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2897 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2899 table
= &state
->table
[table_id
];
2901 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2902 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2903 table
->spec
[filter_idx
].dmaq_id
>=
2907 if (efx
->rx_scatter
)
2908 table
->spec
[filter_idx
].flags
|=
2909 EFX_FILTER_FLAG_RX_SCATTER
;
2911 table
->spec
[filter_idx
].flags
&=
2912 ~EFX_FILTER_FLAG_RX_SCATTER
;
2914 if (table_id
== EFX_FARCH_FILTER_TABLE_RX_DEF
)
2915 /* Pushed by efx_farch_filter_push_rx_config() */
2918 efx_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2919 efx_writeo(efx
, &filter
,
2920 table
->offset
+ table
->step
* filter_idx
);
2924 efx_farch_filter_push_rx_config(efx
);
2926 spin_unlock_bh(&efx
->filter_lock
);
2929 #ifdef CONFIG_RFS_ACCEL
2931 s32
efx_farch_filter_rfs_insert(struct efx_nic
*efx
,
2932 struct efx_filter_spec
*gen_spec
)
2934 return efx_farch_filter_insert(efx
, gen_spec
, true);
2937 bool efx_farch_filter_rfs_expire_one(struct efx_nic
*efx
, u32 flow_id
,
2940 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2941 struct efx_farch_filter_table
*table
=
2942 &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
2944 if (test_bit(index
, table
->used_bitmap
) &&
2945 table
->spec
[index
].priority
== EFX_FILTER_PRI_HINT
&&
2946 rps_may_expire_flow(efx
->net_dev
, table
->spec
[index
].dmaq_id
,
2948 efx_farch_filter_table_clear_entry(efx
, table
, index
);
2955 #endif /* CONFIG_RFS_ACCEL */
2957 void efx_farch_filter_sync_rx_mode(struct efx_nic
*efx
)
2959 struct net_device
*net_dev
= efx
->net_dev
;
2960 struct netdev_hw_addr
*ha
;
2961 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
2965 if (!efx_dev_registered(efx
))
2968 netif_addr_lock_bh(net_dev
);
2970 efx
->unicast_filter
= !(net_dev
->flags
& IFF_PROMISC
);
2972 /* Build multicast hash table */
2973 if (net_dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
2974 memset(mc_hash
, 0xff, sizeof(*mc_hash
));
2976 memset(mc_hash
, 0x00, sizeof(*mc_hash
));
2977 netdev_for_each_mc_addr(ha
, net_dev
) {
2978 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
2979 bit
= crc
& (EFX_MCAST_HASH_ENTRIES
- 1);
2980 __set_bit_le(bit
, mc_hash
);
2983 /* Broadcast packets go through the multicast hash filter.
2984 * ether_crc_le() of the broadcast address is 0xbe2612ff
2985 * so we always add bit 0xff to the mask.
2987 __set_bit_le(0xff, mc_hash
);
2990 netif_addr_unlock_bh(net_dev
);