1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
18 /* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
22 #define FILTER_CTL_SRCH_FUDGE_WILD 3
23 #define FILTER_CTL_SRCH_FUDGE_FULL 1
25 /* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_filter_search() when the
29 #define FILTER_CTL_SRCH_MAX 200
31 /* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33 #define FILTER_CTL_SRCH_HINT_MAX 5
35 enum efx_filter_table_id
{
36 EFX_FILTER_TABLE_RX_IP
= 0,
37 EFX_FILTER_TABLE_RX_MAC
,
38 EFX_FILTER_TABLE_COUNT
,
41 struct efx_filter_table
{
42 enum efx_filter_table_id id
;
43 u32 offset
; /* address of table relative to BAR */
44 unsigned size
; /* number of entries */
45 unsigned step
; /* step between entries */
46 unsigned used
; /* number currently used */
47 unsigned long *used_bitmap
;
48 struct efx_filter_spec
*spec
;
49 unsigned search_depth
[EFX_FILTER_TYPE_COUNT
];
52 struct efx_filter_state
{
54 struct efx_filter_table table
[EFX_FILTER_TABLE_COUNT
];
55 #ifdef CONFIG_RFS_ACCEL
57 unsigned rps_expire_index
;
61 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
62 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
63 static u16
efx_filter_hash(u32 key
)
68 tmp
= 0x1fff ^ key
>> 16;
69 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
72 tmp
= tmp
^ tmp
<< 13 ^ key
;
73 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
74 return tmp
^ tmp
>> 9;
77 /* To allow for hash collisions, filter search continues at these
78 * increments from the first possible entry selected by the hash. */
79 static u16
efx_filter_increment(u32 key
)
84 static enum efx_filter_table_id
85 efx_filter_spec_table_id(const struct efx_filter_spec
*spec
)
87 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP
!= (EFX_FILTER_TCP_FULL
>> 2));
88 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP
!= (EFX_FILTER_TCP_WILD
>> 2));
89 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP
!= (EFX_FILTER_UDP_FULL
>> 2));
90 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP
!= (EFX_FILTER_UDP_WILD
>> 2));
91 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC
!= (EFX_FILTER_MAC_FULL
>> 2));
92 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC
!= (EFX_FILTER_MAC_WILD
>> 2));
93 EFX_BUG_ON_PARANOID(spec
->type
== EFX_FILTER_UNSPEC
);
94 return spec
->type
>> 2;
97 static struct efx_filter_table
*
98 efx_filter_spec_table(struct efx_filter_state
*state
,
99 const struct efx_filter_spec
*spec
)
101 if (spec
->type
== EFX_FILTER_UNSPEC
)
104 return &state
->table
[efx_filter_spec_table_id(spec
)];
107 static void efx_filter_table_reset_search_depth(struct efx_filter_table
*table
)
109 memset(table
->search_depth
, 0, sizeof(table
->search_depth
));
112 static void efx_filter_push_rx_limits(struct efx_nic
*efx
)
114 struct efx_filter_state
*state
= efx
->filter_state
;
115 struct efx_filter_table
*table
;
116 efx_oword_t filter_ctl
;
118 efx_reado(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
120 table
= &state
->table
[EFX_FILTER_TABLE_RX_IP
];
121 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_FULL_SRCH_LIMIT
,
122 table
->search_depth
[EFX_FILTER_TCP_FULL
] +
123 FILTER_CTL_SRCH_FUDGE_FULL
);
124 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_WILD_SRCH_LIMIT
,
125 table
->search_depth
[EFX_FILTER_TCP_WILD
] +
126 FILTER_CTL_SRCH_FUDGE_WILD
);
127 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_FULL_SRCH_LIMIT
,
128 table
->search_depth
[EFX_FILTER_UDP_FULL
] +
129 FILTER_CTL_SRCH_FUDGE_FULL
);
130 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_WILD_SRCH_LIMIT
,
131 table
->search_depth
[EFX_FILTER_UDP_WILD
] +
132 FILTER_CTL_SRCH_FUDGE_WILD
);
134 table
= &state
->table
[EFX_FILTER_TABLE_RX_MAC
];
137 filter_ctl
, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT
,
138 table
->search_depth
[EFX_FILTER_MAC_FULL
] +
139 FILTER_CTL_SRCH_FUDGE_FULL
);
141 filter_ctl
, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT
,
142 table
->search_depth
[EFX_FILTER_MAC_WILD
] +
143 FILTER_CTL_SRCH_FUDGE_WILD
);
146 efx_writeo(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
149 static inline void __efx_filter_set_ipv4(struct efx_filter_spec
*spec
,
150 __be32 host1
, __be16 port1
,
151 __be32 host2
, __be16 port2
)
153 spec
->data
[0] = ntohl(host1
) << 16 | ntohs(port1
);
154 spec
->data
[1] = ntohs(port2
) << 16 | ntohl(host1
) >> 16;
155 spec
->data
[2] = ntohl(host2
);
158 static inline void __efx_filter_get_ipv4(const struct efx_filter_spec
*spec
,
159 __be32
*host1
, __be16
*port1
,
160 __be32
*host2
, __be16
*port2
)
162 *host1
= htonl(spec
->data
[0] >> 16 | spec
->data
[1] << 16);
163 *port1
= htons(spec
->data
[0]);
164 *host2
= htonl(spec
->data
[2]);
165 *port2
= htons(spec
->data
[1] >> 16);
169 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
170 * @spec: Specification to initialise
171 * @proto: Transport layer protocol number
172 * @host: Local host address (network byte order)
173 * @port: Local port (network byte order)
175 int efx_filter_set_ipv4_local(struct efx_filter_spec
*spec
, u8 proto
,
176 __be32 host
, __be16 port
)
181 EFX_BUG_ON_PARANOID(!(spec
->flags
& EFX_FILTER_FLAG_RX
));
183 /* This cannot currently be combined with other filtering */
184 if (spec
->type
!= EFX_FILTER_UNSPEC
)
185 return -EPROTONOSUPPORT
;
192 spec
->type
= EFX_FILTER_TCP_WILD
;
195 spec
->type
= EFX_FILTER_UDP_WILD
;
198 return -EPROTONOSUPPORT
;
201 /* Filter is constructed in terms of source and destination,
202 * with the odd wrinkle that the ports are swapped in a UDP
203 * wildcard filter. We need to convert from local and remote
204 * (= zero for wildcard) addresses.
207 if (proto
!= IPPROTO_UDP
) {
214 __efx_filter_set_ipv4(spec
, host1
, port1
, host
, port
);
218 int efx_filter_get_ipv4_local(const struct efx_filter_spec
*spec
,
219 u8
*proto
, __be32
*host
, __be16
*port
)
224 switch (spec
->type
) {
225 case EFX_FILTER_TCP_WILD
:
226 *proto
= IPPROTO_TCP
;
227 __efx_filter_get_ipv4(spec
, &host1
, &port1
, host
, port
);
229 case EFX_FILTER_UDP_WILD
:
230 *proto
= IPPROTO_UDP
;
231 __efx_filter_get_ipv4(spec
, &host1
, port
, host
, &port1
);
239 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
240 * @spec: Specification to initialise
241 * @proto: Transport layer protocol number
242 * @host: Local host address (network byte order)
243 * @port: Local port (network byte order)
244 * @rhost: Remote host address (network byte order)
245 * @rport: Remote port (network byte order)
247 int efx_filter_set_ipv4_full(struct efx_filter_spec
*spec
, u8 proto
,
248 __be32 host
, __be16 port
,
249 __be32 rhost
, __be16 rport
)
251 EFX_BUG_ON_PARANOID(!(spec
->flags
& EFX_FILTER_FLAG_RX
));
253 /* This cannot currently be combined with other filtering */
254 if (spec
->type
!= EFX_FILTER_UNSPEC
)
255 return -EPROTONOSUPPORT
;
257 if (port
== 0 || rport
== 0)
262 spec
->type
= EFX_FILTER_TCP_FULL
;
265 spec
->type
= EFX_FILTER_UDP_FULL
;
268 return -EPROTONOSUPPORT
;
271 __efx_filter_set_ipv4(spec
, rhost
, rport
, host
, port
);
275 int efx_filter_get_ipv4_full(const struct efx_filter_spec
*spec
,
276 u8
*proto
, __be32
*host
, __be16
*port
,
277 __be32
*rhost
, __be16
*rport
)
279 switch (spec
->type
) {
280 case EFX_FILTER_TCP_FULL
:
281 *proto
= IPPROTO_TCP
;
283 case EFX_FILTER_UDP_FULL
:
284 *proto
= IPPROTO_UDP
;
290 __efx_filter_get_ipv4(spec
, rhost
, rport
, host
, port
);
295 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
296 * @spec: Specification to initialise
297 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
298 * @addr: Local Ethernet MAC address
300 int efx_filter_set_eth_local(struct efx_filter_spec
*spec
,
301 u16 vid
, const u8
*addr
)
303 EFX_BUG_ON_PARANOID(!(spec
->flags
& EFX_FILTER_FLAG_RX
));
305 /* This cannot currently be combined with other filtering */
306 if (spec
->type
!= EFX_FILTER_UNSPEC
)
307 return -EPROTONOSUPPORT
;
309 if (vid
== EFX_FILTER_VID_UNSPEC
) {
310 spec
->type
= EFX_FILTER_MAC_WILD
;
313 spec
->type
= EFX_FILTER_MAC_FULL
;
317 spec
->data
[1] = addr
[2] << 24 | addr
[3] << 16 | addr
[4] << 8 | addr
[5];
318 spec
->data
[2] = addr
[0] << 8 | addr
[1];
322 int efx_filter_get_eth_local(const struct efx_filter_spec
*spec
,
325 switch (spec
->type
) {
326 case EFX_FILTER_MAC_WILD
:
327 *vid
= EFX_FILTER_VID_UNSPEC
;
329 case EFX_FILTER_MAC_FULL
:
330 *vid
= spec
->data
[0];
336 addr
[0] = spec
->data
[2] >> 8;
337 addr
[1] = spec
->data
[2];
338 addr
[2] = spec
->data
[1] >> 24;
339 addr
[3] = spec
->data
[1] >> 16;
340 addr
[4] = spec
->data
[1] >> 8;
341 addr
[5] = spec
->data
[1];
345 /* Build a filter entry and return its n-tuple key. */
346 static u32
efx_filter_build(efx_oword_t
*filter
, struct efx_filter_spec
*spec
)
350 switch (efx_filter_spec_table_id(spec
)) {
351 case EFX_FILTER_TABLE_RX_IP
: {
352 bool is_udp
= (spec
->type
== EFX_FILTER_UDP_FULL
||
353 spec
->type
== EFX_FILTER_UDP_WILD
);
354 EFX_POPULATE_OWORD_7(
357 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
359 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
360 FRF_BZ_TCP_UDP
, is_udp
,
361 FRF_BZ_RXQ_ID
, spec
->dmaq_id
,
362 EFX_DWORD_2
, spec
->data
[2],
363 EFX_DWORD_1
, spec
->data
[1],
364 EFX_DWORD_0
, spec
->data
[0]);
369 case EFX_FILTER_TABLE_RX_MAC
: {
370 bool is_wild
= spec
->type
== EFX_FILTER_MAC_WILD
;
371 EFX_POPULATE_OWORD_8(
374 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
375 FRF_CZ_RMFT_SCATTER_EN
,
376 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
377 FRF_CZ_RMFT_IP_OVERRIDE
,
378 !!(spec
->flags
& EFX_FILTER_FLAG_RX_OVERRIDE_IP
),
379 FRF_CZ_RMFT_RXQ_ID
, spec
->dmaq_id
,
380 FRF_CZ_RMFT_WILDCARD_MATCH
, is_wild
,
381 FRF_CZ_RMFT_DEST_MAC_HI
, spec
->data
[2],
382 FRF_CZ_RMFT_DEST_MAC_LO
, spec
->data
[1],
383 FRF_CZ_RMFT_VLAN_ID
, spec
->data
[0]);
392 return spec
->data
[0] ^ spec
->data
[1] ^ spec
->data
[2] ^ data3
;
395 static bool efx_filter_equal(const struct efx_filter_spec
*left
,
396 const struct efx_filter_spec
*right
)
398 if (left
->type
!= right
->type
||
399 memcmp(left
->data
, right
->data
, sizeof(left
->data
)))
405 static int efx_filter_search(struct efx_filter_table
*table
,
406 struct efx_filter_spec
*spec
, u32 key
,
407 bool for_insert
, unsigned int *depth_required
)
409 unsigned hash
, incr
, filter_idx
, depth
, depth_max
;
411 hash
= efx_filter_hash(key
);
412 incr
= efx_filter_increment(key
);
414 filter_idx
= hash
& (table
->size
- 1);
416 depth_max
= (for_insert
?
417 (spec
->priority
<= EFX_FILTER_PRI_HINT
?
418 FILTER_CTL_SRCH_HINT_MAX
: FILTER_CTL_SRCH_MAX
) :
419 table
->search_depth
[spec
->type
]);
422 /* Return success if entry is used and matches this spec
423 * or entry is unused and we are trying to insert.
425 if (test_bit(filter_idx
, table
->used_bitmap
) ?
426 efx_filter_equal(spec
, &table
->spec
[filter_idx
]) :
428 *depth_required
= depth
;
432 /* Return failure if we reached the maximum search depth */
433 if (depth
== depth_max
)
434 return for_insert
? -EBUSY
: -ENOENT
;
436 filter_idx
= (filter_idx
+ incr
) & (table
->size
- 1);
442 * Construct/deconstruct external filter IDs. These must be ordered
443 * by matching priority, for RX NFC semantics.
445 * Each RX MAC filter entry has a flag for whether it can override an
446 * RX IP filter that also matches. So we assign locations for MAC
447 * filters with overriding behaviour, then for IP filters, then for
448 * MAC filters without overriding behaviour.
451 #define EFX_FILTER_INDEX_WIDTH 13
452 #define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
454 static inline u32
efx_filter_make_id(enum efx_filter_table_id table_id
,
455 unsigned int index
, u8 flags
)
457 return (table_id
== EFX_FILTER_TABLE_RX_MAC
&&
458 flags
& EFX_FILTER_FLAG_RX_OVERRIDE_IP
) ?
460 (table_id
+ 1) << EFX_FILTER_INDEX_WIDTH
| index
;
463 static inline enum efx_filter_table_id
efx_filter_id_table_id(u32 id
)
465 return (id
<= EFX_FILTER_INDEX_MASK
) ?
466 EFX_FILTER_TABLE_RX_MAC
:
467 (id
>> EFX_FILTER_INDEX_WIDTH
) - 1;
470 static inline unsigned int efx_filter_id_index(u32 id
)
472 return id
& EFX_FILTER_INDEX_MASK
;
475 static inline u8
efx_filter_id_flags(u32 id
)
477 return (id
<= EFX_FILTER_INDEX_MASK
) ?
478 EFX_FILTER_FLAG_RX
| EFX_FILTER_FLAG_RX_OVERRIDE_IP
:
482 u32
efx_filter_get_rx_id_limit(struct efx_nic
*efx
)
484 struct efx_filter_state
*state
= efx
->filter_state
;
486 if (state
->table
[EFX_FILTER_TABLE_RX_MAC
].size
!= 0)
487 return ((EFX_FILTER_TABLE_RX_MAC
+ 1) << EFX_FILTER_INDEX_WIDTH
)
488 + state
->table
[EFX_FILTER_TABLE_RX_MAC
].size
;
489 else if (state
->table
[EFX_FILTER_TABLE_RX_IP
].size
!= 0)
490 return ((EFX_FILTER_TABLE_RX_IP
+ 1) << EFX_FILTER_INDEX_WIDTH
)
491 + state
->table
[EFX_FILTER_TABLE_RX_IP
].size
;
497 * efx_filter_insert_filter - add or replace a filter
498 * @efx: NIC in which to insert the filter
499 * @spec: Specification for the filter
500 * @replace: Flag for whether the specified filter may replace a filter
501 * with an identical match expression and equal or lower priority
503 * On success, return the filter ID.
504 * On failure, return a negative error code.
506 s32
efx_filter_insert_filter(struct efx_nic
*efx
, struct efx_filter_spec
*spec
,
509 struct efx_filter_state
*state
= efx
->filter_state
;
510 struct efx_filter_table
*table
= efx_filter_spec_table(state
, spec
);
511 struct efx_filter_spec
*saved_spec
;
513 unsigned int filter_idx
, depth
;
517 if (!table
|| table
->size
== 0)
520 key
= efx_filter_build(&filter
, spec
);
522 netif_vdbg(efx
, hw
, efx
->net_dev
,
523 "%s: type %d search_depth=%d", __func__
, spec
->type
,
524 table
->search_depth
[spec
->type
]);
526 spin_lock_bh(&state
->lock
);
528 rc
= efx_filter_search(table
, spec
, key
, true, &depth
);
532 BUG_ON(filter_idx
>= table
->size
);
533 saved_spec
= &table
->spec
[filter_idx
];
535 if (test_bit(filter_idx
, table
->used_bitmap
)) {
536 /* Should we replace the existing filter? */
541 if (spec
->priority
< saved_spec
->priority
) {
546 __set_bit(filter_idx
, table
->used_bitmap
);
551 if (table
->search_depth
[spec
->type
] < depth
) {
552 table
->search_depth
[spec
->type
] = depth
;
553 efx_filter_push_rx_limits(efx
);
556 efx_writeo(efx
, &filter
, table
->offset
+ table
->step
* filter_idx
);
558 netif_vdbg(efx
, hw
, efx
->net_dev
,
559 "%s: filter type %d index %d rxq %u set",
560 __func__
, spec
->type
, filter_idx
, spec
->dmaq_id
);
561 rc
= efx_filter_make_id(table
->id
, filter_idx
, spec
->flags
);
564 spin_unlock_bh(&state
->lock
);
568 static void efx_filter_table_clear_entry(struct efx_nic
*efx
,
569 struct efx_filter_table
*table
,
570 unsigned int filter_idx
)
572 static efx_oword_t filter
;
574 if (test_bit(filter_idx
, table
->used_bitmap
)) {
575 __clear_bit(filter_idx
, table
->used_bitmap
);
577 memset(&table
->spec
[filter_idx
], 0, sizeof(table
->spec
[0]));
579 efx_writeo(efx
, &filter
,
580 table
->offset
+ table
->step
* filter_idx
);
585 * efx_filter_remove_id_safe - remove a filter by ID, carefully
586 * @efx: NIC from which to remove the filter
587 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
588 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
590 * This function will range-check @filter_id, so it is safe to call
591 * with a value passed from userland.
593 int efx_filter_remove_id_safe(struct efx_nic
*efx
,
594 enum efx_filter_priority priority
,
597 struct efx_filter_state
*state
= efx
->filter_state
;
598 enum efx_filter_table_id table_id
;
599 struct efx_filter_table
*table
;
600 unsigned int filter_idx
;
601 struct efx_filter_spec
*spec
;
605 table_id
= efx_filter_id_table_id(filter_id
);
606 if ((unsigned int)table_id
>= EFX_FILTER_TABLE_COUNT
)
608 table
= &state
->table
[table_id
];
610 filter_idx
= efx_filter_id_index(filter_id
);
611 if (filter_idx
>= table
->size
)
613 spec
= &table
->spec
[filter_idx
];
615 filter_flags
= efx_filter_id_flags(filter_id
);
617 spin_lock_bh(&state
->lock
);
619 if (test_bit(filter_idx
, table
->used_bitmap
) &&
620 spec
->priority
== priority
&& spec
->flags
== filter_flags
) {
621 efx_filter_table_clear_entry(efx
, table
, filter_idx
);
622 if (table
->used
== 0)
623 efx_filter_table_reset_search_depth(table
);
629 spin_unlock_bh(&state
->lock
);
635 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
636 * @efx: NIC from which to remove the filter
637 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
638 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
639 * @spec: Buffer in which to store filter specification
641 * This function will range-check @filter_id, so it is safe to call
642 * with a value passed from userland.
644 int efx_filter_get_filter_safe(struct efx_nic
*efx
,
645 enum efx_filter_priority priority
,
646 u32 filter_id
, struct efx_filter_spec
*spec_buf
)
648 struct efx_filter_state
*state
= efx
->filter_state
;
649 enum efx_filter_table_id table_id
;
650 struct efx_filter_table
*table
;
651 struct efx_filter_spec
*spec
;
652 unsigned int filter_idx
;
656 table_id
= efx_filter_id_table_id(filter_id
);
657 if ((unsigned int)table_id
>= EFX_FILTER_TABLE_COUNT
)
659 table
= &state
->table
[table_id
];
661 filter_idx
= efx_filter_id_index(filter_id
);
662 if (filter_idx
>= table
->size
)
664 spec
= &table
->spec
[filter_idx
];
666 filter_flags
= efx_filter_id_flags(filter_id
);
668 spin_lock_bh(&state
->lock
);
670 if (test_bit(filter_idx
, table
->used_bitmap
) &&
671 spec
->priority
== priority
&& spec
->flags
== filter_flags
) {
678 spin_unlock_bh(&state
->lock
);
683 static void efx_filter_table_clear(struct efx_nic
*efx
,
684 enum efx_filter_table_id table_id
,
685 enum efx_filter_priority priority
)
687 struct efx_filter_state
*state
= efx
->filter_state
;
688 struct efx_filter_table
*table
= &state
->table
[table_id
];
689 unsigned int filter_idx
;
691 spin_lock_bh(&state
->lock
);
693 for (filter_idx
= 0; filter_idx
< table
->size
; ++filter_idx
)
694 if (table
->spec
[filter_idx
].priority
<= priority
)
695 efx_filter_table_clear_entry(efx
, table
, filter_idx
);
696 if (table
->used
== 0)
697 efx_filter_table_reset_search_depth(table
);
699 spin_unlock_bh(&state
->lock
);
703 * efx_filter_clear_rx - remove RX filters by priority
704 * @efx: NIC from which to remove the filters
705 * @priority: Maximum priority to remove
707 void efx_filter_clear_rx(struct efx_nic
*efx
, enum efx_filter_priority priority
)
709 efx_filter_table_clear(efx
, EFX_FILTER_TABLE_RX_IP
, priority
);
710 efx_filter_table_clear(efx
, EFX_FILTER_TABLE_RX_MAC
, priority
);
713 u32
efx_filter_count_rx_used(struct efx_nic
*efx
,
714 enum efx_filter_priority priority
)
716 struct efx_filter_state
*state
= efx
->filter_state
;
717 enum efx_filter_table_id table_id
;
718 struct efx_filter_table
*table
;
719 unsigned int filter_idx
;
722 spin_lock_bh(&state
->lock
);
724 for (table_id
= EFX_FILTER_TABLE_RX_IP
;
725 table_id
<= EFX_FILTER_TABLE_RX_MAC
;
727 table
= &state
->table
[table_id
];
728 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
729 if (test_bit(filter_idx
, table
->used_bitmap
) &&
730 table
->spec
[filter_idx
].priority
== priority
)
735 spin_unlock_bh(&state
->lock
);
740 s32
efx_filter_get_rx_ids(struct efx_nic
*efx
,
741 enum efx_filter_priority priority
,
744 struct efx_filter_state
*state
= efx
->filter_state
;
745 enum efx_filter_table_id table_id
;
746 struct efx_filter_table
*table
;
747 unsigned int filter_idx
;
750 spin_lock_bh(&state
->lock
);
752 for (table_id
= EFX_FILTER_TABLE_RX_IP
;
753 table_id
<= EFX_FILTER_TABLE_RX_MAC
;
755 table
= &state
->table
[table_id
];
756 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
757 if (test_bit(filter_idx
, table
->used_bitmap
) &&
758 table
->spec
[filter_idx
].priority
== priority
) {
763 buf
[count
++] = efx_filter_make_id(
764 table_id
, filter_idx
,
765 table
->spec
[filter_idx
].flags
);
770 spin_unlock_bh(&state
->lock
);
775 /* Restore filter stater after reset */
776 void efx_restore_filters(struct efx_nic
*efx
)
778 struct efx_filter_state
*state
= efx
->filter_state
;
779 enum efx_filter_table_id table_id
;
780 struct efx_filter_table
*table
;
782 unsigned int filter_idx
;
784 spin_lock_bh(&state
->lock
);
786 for (table_id
= 0; table_id
< EFX_FILTER_TABLE_COUNT
; table_id
++) {
787 table
= &state
->table
[table_id
];
788 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
789 if (!test_bit(filter_idx
, table
->used_bitmap
))
791 efx_filter_build(&filter
, &table
->spec
[filter_idx
]);
792 efx_writeo(efx
, &filter
,
793 table
->offset
+ table
->step
* filter_idx
);
797 efx_filter_push_rx_limits(efx
);
799 spin_unlock_bh(&state
->lock
);
802 int efx_probe_filters(struct efx_nic
*efx
)
804 struct efx_filter_state
*state
;
805 struct efx_filter_table
*table
;
808 state
= kzalloc(sizeof(*efx
->filter_state
), GFP_KERNEL
);
811 efx
->filter_state
= state
;
813 spin_lock_init(&state
->lock
);
815 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
816 #ifdef CONFIG_RFS_ACCEL
817 state
->rps_flow_id
= kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS
,
818 sizeof(*state
->rps_flow_id
),
820 if (!state
->rps_flow_id
)
823 table
= &state
->table
[EFX_FILTER_TABLE_RX_IP
];
824 table
->id
= EFX_FILTER_TABLE_RX_IP
;
825 table
->offset
= FR_BZ_RX_FILTER_TBL0
;
826 table
->size
= FR_BZ_RX_FILTER_TBL0_ROWS
;
827 table
->step
= FR_BZ_RX_FILTER_TBL0_STEP
;
830 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
831 table
= &state
->table
[EFX_FILTER_TABLE_RX_MAC
];
832 table
->id
= EFX_FILTER_TABLE_RX_MAC
;
833 table
->offset
= FR_CZ_RX_MAC_FILTER_TBL0
;
834 table
->size
= FR_CZ_RX_MAC_FILTER_TBL0_ROWS
;
835 table
->step
= FR_CZ_RX_MAC_FILTER_TBL0_STEP
;
838 for (table_id
= 0; table_id
< EFX_FILTER_TABLE_COUNT
; table_id
++) {
839 table
= &state
->table
[table_id
];
840 if (table
->size
== 0)
842 table
->used_bitmap
= kcalloc(BITS_TO_LONGS(table
->size
),
843 sizeof(unsigned long),
845 if (!table
->used_bitmap
)
847 table
->spec
= vzalloc(table
->size
* sizeof(*table
->spec
));
855 efx_remove_filters(efx
);
859 void efx_remove_filters(struct efx_nic
*efx
)
861 struct efx_filter_state
*state
= efx
->filter_state
;
862 enum efx_filter_table_id table_id
;
864 for (table_id
= 0; table_id
< EFX_FILTER_TABLE_COUNT
; table_id
++) {
865 kfree(state
->table
[table_id
].used_bitmap
);
866 vfree(state
->table
[table_id
].spec
);
868 #ifdef CONFIG_RFS_ACCEL
869 kfree(state
->rps_flow_id
);
874 #ifdef CONFIG_RFS_ACCEL
876 int efx_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
877 u16 rxq_index
, u32 flow_id
)
879 struct efx_nic
*efx
= netdev_priv(net_dev
);
880 struct efx_channel
*channel
;
881 struct efx_filter_state
*state
= efx
->filter_state
;
882 struct efx_filter_spec spec
;
883 const struct iphdr
*ip
;
888 nhoff
= skb_network_offset(skb
);
890 if (skb
->protocol
!= htons(ETH_P_IP
))
891 return -EPROTONOSUPPORT
;
893 /* RFS must validate the IP header length before calling us */
894 EFX_BUG_ON_PARANOID(skb_headlen(skb
) < nhoff
+ sizeof(*ip
));
895 ip
= (const struct iphdr
*)(skb
->data
+ nhoff
);
896 if (ip_is_fragment(ip
))
897 return -EPROTONOSUPPORT
;
898 EFX_BUG_ON_PARANOID(skb_headlen(skb
) < nhoff
+ 4 * ip
->ihl
+ 4);
899 ports
= (const __be16
*)(skb
->data
+ nhoff
+ 4 * ip
->ihl
);
901 efx_filter_init_rx(&spec
, EFX_FILTER_PRI_HINT
, 0, rxq_index
);
902 rc
= efx_filter_set_ipv4_full(&spec
, ip
->protocol
,
903 ip
->daddr
, ports
[1], ip
->saddr
, ports
[0]);
907 rc
= efx_filter_insert_filter(efx
, &spec
, true);
911 /* Remember this so we can check whether to expire the filter later */
912 state
->rps_flow_id
[rc
] = flow_id
;
913 channel
= efx_get_channel(efx
, skb_get_rx_queue(skb
));
914 ++channel
->rfs_filters_added
;
916 netif_info(efx
, rx_status
, efx
->net_dev
,
917 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
918 (ip
->protocol
== IPPROTO_TCP
) ? "TCP" : "UDP",
919 &ip
->saddr
, ntohs(ports
[0]), &ip
->daddr
, ntohs(ports
[1]),
920 rxq_index
, flow_id
, rc
);
925 bool __efx_filter_rfs_expire(struct efx_nic
*efx
, unsigned quota
)
927 struct efx_filter_state
*state
= efx
->filter_state
;
928 struct efx_filter_table
*table
= &state
->table
[EFX_FILTER_TABLE_RX_IP
];
929 unsigned mask
= table
->size
- 1;
933 if (!spin_trylock_bh(&state
->lock
))
936 index
= state
->rps_expire_index
;
937 stop
= (index
+ quota
) & mask
;
939 while (index
!= stop
) {
940 if (test_bit(index
, table
->used_bitmap
) &&
941 table
->spec
[index
].priority
== EFX_FILTER_PRI_HINT
&&
942 rps_may_expire_flow(efx
->net_dev
,
943 table
->spec
[index
].dmaq_id
,
944 state
->rps_flow_id
[index
], index
)) {
945 netif_info(efx
, rx_status
, efx
->net_dev
,
946 "expiring filter %d [flow %u]\n",
947 index
, state
->rps_flow_id
[index
]);
948 efx_filter_table_clear_entry(efx
, table
, index
);
950 index
= (index
+ 1) & mask
;
953 state
->rps_expire_index
= stop
;
954 if (table
->used
== 0)
955 efx_filter_table_reset_search_depth(table
);
957 spin_unlock_bh(&state
->lock
);
961 #endif /* CONFIG_RFS_ACCEL */