1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
8 #include <linux/socket.h>
10 #include <linux/slab.h>
12 #include <linux/ipv6.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/prefetch.h>
16 #include <linux/moduleparam.h>
17 #include <linux/iommu.h>
19 #include <net/checksum.h>
21 #include <linux/bpf_trace.h>
22 #include "net_driver.h"
24 #include "rx_common.h"
28 #include "workarounds.h"
30 /* Preferred number of descriptors to fill at once */
31 #define EFX_RX_PREFERRED_BATCH 8U
33 /* Maximum rx prefix used by any architecture. */
34 #define EFX_MAX_RX_PREFIX_SIZE 16
36 /* Size of buffer allocated for skb header area. */
37 #define EFX_SKB_HEADERS 128u
39 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
40 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
43 static inline void efx_sync_rx_buffer(struct efx_nic
*efx
,
44 struct efx_rx_buffer
*rx_buf
,
47 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
, len
,
51 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
52 struct efx_rx_buffer
*rx_buf
,
55 struct efx_nic
*efx
= rx_queue
->efx
;
56 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
58 if (likely(len
<= max_len
))
61 /* The packet must be discarded, but this is only a fatal error
62 * if the caller indicated it was
64 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
67 netif_err(efx
, rx_err
, efx
->net_dev
,
68 "RX queue %d overlength RX event (%#x > %#x)\n",
69 efx_rx_queue_index(rx_queue
), len
, max_len
);
71 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
74 /* Allocate and construct an SKB around page fragments */
75 static struct sk_buff
*efx_rx_mk_skb(struct efx_channel
*channel
,
76 struct efx_rx_buffer
*rx_buf
,
80 struct efx_nic
*efx
= channel
->efx
;
83 /* Allocate an SKB to store the headers */
84 skb
= netdev_alloc_skb(efx
->net_dev
,
85 efx
->rx_ip_align
+ efx
->rx_prefix_size
+
87 if (unlikely(skb
== NULL
)) {
88 atomic_inc(&efx
->n_rx_noskb_drops
);
92 EFX_WARN_ON_ONCE_PARANOID(rx_buf
->len
< hdr_len
);
94 memcpy(skb
->data
+ efx
->rx_ip_align
, eh
- efx
->rx_prefix_size
,
95 efx
->rx_prefix_size
+ hdr_len
);
96 skb_reserve(skb
, efx
->rx_ip_align
+ efx
->rx_prefix_size
);
97 __skb_put(skb
, hdr_len
);
99 /* Append the remaining page(s) onto the frag list */
100 if (rx_buf
->len
> hdr_len
) {
101 rx_buf
->page_offset
+= hdr_len
;
102 rx_buf
->len
-= hdr_len
;
105 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
106 rx_buf
->page
, rx_buf
->page_offset
,
109 skb
->len
+= rx_buf
->len
;
110 skb
->data_len
+= rx_buf
->len
;
111 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
114 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
117 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
122 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
124 /* Move past the ethernet header */
125 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
127 skb_mark_napi_id(skb
, &channel
->napi_str
);
132 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
133 unsigned int n_frags
, unsigned int len
, u16 flags
)
135 struct efx_nic
*efx
= rx_queue
->efx
;
136 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
137 struct efx_rx_buffer
*rx_buf
;
139 rx_queue
->rx_packets
++;
141 rx_buf
= efx_rx_buffer(rx_queue
, index
);
142 rx_buf
->flags
|= flags
;
144 /* Validate the number of fragments and completed length */
146 if (!(flags
& EFX_RX_PKT_PREFIX_LEN
))
147 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
);
148 } else if (unlikely(n_frags
> EFX_RX_MAX_FRAGS
) ||
149 unlikely(len
<= (n_frags
- 1) * efx
->rx_dma_len
) ||
150 unlikely(len
> n_frags
* efx
->rx_dma_len
) ||
151 unlikely(!efx
->rx_scatter
)) {
152 /* If this isn't an explicit discard request, either
153 * the hardware or the driver is broken.
155 WARN_ON(!(len
== 0 && rx_buf
->flags
& EFX_RX_PKT_DISCARD
));
156 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
159 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
160 "RX queue %d received ids %x-%x len %d %s%s\n",
161 efx_rx_queue_index(rx_queue
), index
,
162 (index
+ n_frags
- 1) & rx_queue
->ptr_mask
, len
,
163 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
164 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
166 /* Discard packet, if instructed to do so. Process the
167 * previous receive first.
169 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
170 efx_rx_flush_packet(channel
);
171 efx_discard_rx_packet(channel
, rx_buf
, n_frags
);
175 if (n_frags
== 1 && !(flags
& EFX_RX_PKT_PREFIX_LEN
))
178 /* Release and/or sync the DMA mapping - assumes all RX buffers
179 * consumed in-order per RX queue.
181 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
183 /* Prefetch nice and early so data will (hopefully) be in cache by
184 * the time we look at it.
186 prefetch(efx_rx_buf_va(rx_buf
));
188 rx_buf
->page_offset
+= efx
->rx_prefix_size
;
189 rx_buf
->len
-= efx
->rx_prefix_size
;
192 /* Release/sync DMA mapping for additional fragments.
193 * Fix length for last fragment.
195 unsigned int tail_frags
= n_frags
- 1;
198 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
199 if (--tail_frags
== 0)
201 efx_sync_rx_buffer(efx
, rx_buf
, efx
->rx_dma_len
);
203 rx_buf
->len
= len
- (n_frags
- 1) * efx
->rx_dma_len
;
204 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
207 /* All fragments have been DMA-synced, so recycle pages. */
208 rx_buf
= efx_rx_buffer(rx_queue
, index
);
209 efx_recycle_rx_pages(channel
, rx_buf
, n_frags
);
211 /* Pipeline receives so that we give time for packet headers to be
212 * prefetched into cache.
214 efx_rx_flush_packet(channel
);
215 channel
->rx_pkt_n_frags
= n_frags
;
216 channel
->rx_pkt_index
= index
;
219 static void efx_rx_deliver(struct efx_channel
*channel
, u8
*eh
,
220 struct efx_rx_buffer
*rx_buf
,
221 unsigned int n_frags
)
224 u16 hdr_len
= min_t(u16
, rx_buf
->len
, EFX_SKB_HEADERS
);
226 skb
= efx_rx_mk_skb(channel
, rx_buf
, n_frags
, eh
, hdr_len
);
227 if (unlikely(skb
== NULL
)) {
228 struct efx_rx_queue
*rx_queue
;
230 rx_queue
= efx_channel_get_rx_queue(channel
);
231 efx_free_rx_buffers(rx_queue
, rx_buf
, n_frags
);
234 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
236 /* Set the SKB flags */
237 skb_checksum_none_assert(skb
);
238 if (likely(rx_buf
->flags
& EFX_RX_PKT_CSUMMED
)) {
239 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
240 skb
->csum_level
= !!(rx_buf
->flags
& EFX_RX_PKT_CSUM_LEVEL
);
243 efx_rx_skb_attach_timestamp(channel
, skb
);
245 if (channel
->type
->receive_skb
)
246 if (channel
->type
->receive_skb(channel
, skb
))
249 /* Pass the packet up */
250 if (channel
->rx_list
!= NULL
)
251 /* Add to list, will pass up later */
252 list_add_tail(&skb
->list
, channel
->rx_list
);
254 /* No list, so pass it up now */
255 netif_receive_skb(skb
);
258 /** efx_do_xdp: perform XDP processing on a received packet
260 * Returns true if packet should still be delivered.
262 static bool efx_do_xdp(struct efx_nic
*efx
, struct efx_channel
*channel
,
263 struct efx_rx_buffer
*rx_buf
, u8
**ehp
)
265 u8 rx_prefix
[EFX_MAX_RX_PREFIX_SIZE
];
266 struct efx_rx_queue
*rx_queue
;
267 struct bpf_prog
*xdp_prog
;
268 struct xdp_frame
*xdpf
;
275 xdp_prog
= rcu_dereference(efx
->xdp_prog
);
281 rx_queue
= efx_channel_get_rx_queue(channel
);
283 if (unlikely(channel
->rx_pkt_n_frags
> 1)) {
284 /* We can't do XDP on fragmented packets - drop. */
286 efx_free_rx_buffers(rx_queue
, rx_buf
,
287 channel
->rx_pkt_n_frags
);
289 netif_err(efx
, rx_err
, efx
->net_dev
,
290 "XDP is not possible with multiple receive fragments (%d)\n",
291 channel
->rx_pkt_n_frags
);
292 channel
->n_rx_xdp_bad_drops
++;
296 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
,
297 rx_buf
->len
, DMA_FROM_DEVICE
);
299 /* Save the rx prefix. */
300 EFX_WARN_ON_PARANOID(efx
->rx_prefix_size
> EFX_MAX_RX_PREFIX_SIZE
);
301 memcpy(rx_prefix
, *ehp
- efx
->rx_prefix_size
,
302 efx
->rx_prefix_size
);
305 xdp
.data_hard_start
= xdp
.data
- EFX_XDP_HEADROOM
;
307 /* No support yet for XDP metadata */
308 xdp_set_data_meta_invalid(&xdp
);
309 xdp
.data_end
= xdp
.data
+ rx_buf
->len
;
310 xdp
.rxq
= &rx_queue
->xdp_rxq_info
;
312 xdp_act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
315 offset
= (u8
*)xdp
.data
- *ehp
;
319 /* Fix up rx prefix. */
322 rx_buf
->page_offset
+= offset
;
323 rx_buf
->len
-= offset
;
324 memcpy(*ehp
- efx
->rx_prefix_size
, rx_prefix
,
325 efx
->rx_prefix_size
);
330 /* Buffer ownership passes to tx on success. */
331 xdpf
= convert_to_xdp_frame(&xdp
);
332 err
= efx_xdp_tx_buffers(efx
, 1, &xdpf
, true);
333 if (unlikely(err
!= 1)) {
334 efx_free_rx_buffers(rx_queue
, rx_buf
, 1);
336 netif_err(efx
, rx_err
, efx
->net_dev
,
337 "XDP TX failed (%d)\n", err
);
338 channel
->n_rx_xdp_bad_drops
++;
339 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
341 channel
->n_rx_xdp_tx
++;
346 err
= xdp_do_redirect(efx
->net_dev
, &xdp
, xdp_prog
);
348 efx_free_rx_buffers(rx_queue
, rx_buf
, 1);
350 netif_err(efx
, rx_err
, efx
->net_dev
,
351 "XDP redirect failed (%d)\n", err
);
352 channel
->n_rx_xdp_bad_drops
++;
353 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
355 channel
->n_rx_xdp_redirect
++;
360 bpf_warn_invalid_xdp_action(xdp_act
);
361 efx_free_rx_buffers(rx_queue
, rx_buf
, 1);
362 channel
->n_rx_xdp_bad_drops
++;
363 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
367 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
370 efx_free_rx_buffers(rx_queue
, rx_buf
, 1);
371 channel
->n_rx_xdp_drops
++;
375 return xdp_act
== XDP_PASS
;
378 /* Handle a received packet. Second half: Touches packet payload. */
379 void __efx_rx_packet(struct efx_channel
*channel
)
381 struct efx_nic
*efx
= channel
->efx
;
382 struct efx_rx_buffer
*rx_buf
=
383 efx_rx_buffer(&channel
->rx_queue
, channel
->rx_pkt_index
);
384 u8
*eh
= efx_rx_buf_va(rx_buf
);
386 /* Read length from the prefix if necessary. This already
387 * excludes the length of the prefix itself.
389 if (rx_buf
->flags
& EFX_RX_PKT_PREFIX_LEN
)
390 rx_buf
->len
= le16_to_cpup((__le16
*)
391 (eh
+ efx
->rx_packet_len_offset
));
393 /* If we're in loopback test, then pass the packet directly to the
394 * loopback layer, and free the rx_buf here
396 if (unlikely(efx
->loopback_selftest
)) {
397 struct efx_rx_queue
*rx_queue
;
399 efx_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
400 rx_queue
= efx_channel_get_rx_queue(channel
);
401 efx_free_rx_buffers(rx_queue
, rx_buf
,
402 channel
->rx_pkt_n_frags
);
406 if (!efx_do_xdp(efx
, channel
, rx_buf
, &eh
))
409 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
410 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
412 if ((rx_buf
->flags
& EFX_RX_PKT_TCP
) && !channel
->type
->receive_skb
)
413 efx_rx_packet_gro(channel
, rx_buf
, channel
->rx_pkt_n_frags
, eh
);
415 efx_rx_deliver(channel
, eh
, rx_buf
, channel
->rx_pkt_n_frags
);
417 channel
->rx_pkt_n_frags
= 0;
420 #ifdef CONFIG_RFS_ACCEL
422 static void efx_filter_rfs_work(struct work_struct
*data
)
424 struct efx_async_filter_insertion
*req
= container_of(data
, struct efx_async_filter_insertion
,
426 struct efx_nic
*efx
= netdev_priv(req
->net_dev
);
427 struct efx_channel
*channel
= efx_get_channel(efx
, req
->rxq_index
);
428 int slot_idx
= req
- efx
->rps_slot
;
429 struct efx_arfs_rule
*rule
;
433 rc
= efx
->type
->filter_insert(efx
, &req
->spec
, true);
435 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
436 rc
%= efx
->type
->max_rx_ip_filters
;
437 if (efx
->rps_hash_table
) {
438 spin_lock_bh(&efx
->rps_hash_lock
);
439 rule
= efx_rps_hash_find(efx
, &req
->spec
);
440 /* The rule might have already gone, if someone else's request
441 * for the same spec was already worked and then expired before
442 * we got around to our work. In that case we have nothing
443 * tying us to an arfs_id, meaning that as soon as the filter
444 * is considered for expiry it will be removed.
448 rule
->filter_id
= EFX_ARFS_FILTER_ID_ERROR
;
450 rule
->filter_id
= rc
;
451 arfs_id
= rule
->arfs_id
;
453 spin_unlock_bh(&efx
->rps_hash_lock
);
456 /* Remember this so we can check whether to expire the filter
459 mutex_lock(&efx
->rps_mutex
);
460 if (channel
->rps_flow_id
[rc
] == RPS_FLOW_ID_INVALID
)
461 channel
->rfs_filter_count
++;
462 channel
->rps_flow_id
[rc
] = req
->flow_id
;
463 mutex_unlock(&efx
->rps_mutex
);
465 if (req
->spec
.ether_type
== htons(ETH_P_IP
))
466 netif_info(efx
, rx_status
, efx
->net_dev
,
467 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
468 (req
->spec
.ip_proto
== IPPROTO_TCP
) ? "TCP" : "UDP",
469 req
->spec
.rem_host
, ntohs(req
->spec
.rem_port
),
470 req
->spec
.loc_host
, ntohs(req
->spec
.loc_port
),
471 req
->rxq_index
, req
->flow_id
, rc
, arfs_id
);
473 netif_info(efx
, rx_status
, efx
->net_dev
,
474 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
475 (req
->spec
.ip_proto
== IPPROTO_TCP
) ? "TCP" : "UDP",
476 req
->spec
.rem_host
, ntohs(req
->spec
.rem_port
),
477 req
->spec
.loc_host
, ntohs(req
->spec
.loc_port
),
478 req
->rxq_index
, req
->flow_id
, rc
, arfs_id
);
479 channel
->n_rfs_succeeded
++;
481 if (req
->spec
.ether_type
== htons(ETH_P_IP
))
482 netif_dbg(efx
, rx_status
, efx
->net_dev
,
483 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
484 (req
->spec
.ip_proto
== IPPROTO_TCP
) ? "TCP" : "UDP",
485 req
->spec
.rem_host
, ntohs(req
->spec
.rem_port
),
486 req
->spec
.loc_host
, ntohs(req
->spec
.loc_port
),
487 req
->rxq_index
, req
->flow_id
, rc
, arfs_id
);
489 netif_dbg(efx
, rx_status
, efx
->net_dev
,
490 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
491 (req
->spec
.ip_proto
== IPPROTO_TCP
) ? "TCP" : "UDP",
492 req
->spec
.rem_host
, ntohs(req
->spec
.rem_port
),
493 req
->spec
.loc_host
, ntohs(req
->spec
.loc_port
),
494 req
->rxq_index
, req
->flow_id
, rc
, arfs_id
);
495 channel
->n_rfs_failed
++;
496 /* We're overloading the NIC's filter tables, so let's do a
497 * chunk of extra expiry work.
499 __efx_filter_rfs_expire(channel
, min(channel
->rfs_filter_count
,
503 /* Release references */
504 clear_bit(slot_idx
, &efx
->rps_slot_map
);
505 dev_put(req
->net_dev
);
508 int efx_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
509 u16 rxq_index
, u32 flow_id
)
511 struct efx_nic
*efx
= netdev_priv(net_dev
);
512 struct efx_async_filter_insertion
*req
;
513 struct efx_arfs_rule
*rule
;
519 /* find a free slot */
520 for (slot_idx
= 0; slot_idx
< EFX_RPS_MAX_IN_FLIGHT
; slot_idx
++)
521 if (!test_and_set_bit(slot_idx
, &efx
->rps_slot_map
))
523 if (slot_idx
>= EFX_RPS_MAX_IN_FLIGHT
)
526 if (flow_id
== RPS_FLOW_ID_INVALID
) {
531 if (!skb_flow_dissect_flow_keys(skb
, &fk
, 0)) {
532 rc
= -EPROTONOSUPPORT
;
536 if (fk
.basic
.n_proto
!= htons(ETH_P_IP
) && fk
.basic
.n_proto
!= htons(ETH_P_IPV6
)) {
537 rc
= -EPROTONOSUPPORT
;
540 if (fk
.control
.flags
& FLOW_DIS_IS_FRAGMENT
) {
541 rc
= -EPROTONOSUPPORT
;
545 req
= efx
->rps_slot
+ slot_idx
;
546 efx_filter_init_rx(&req
->spec
, EFX_FILTER_PRI_HINT
,
547 efx
->rx_scatter
? EFX_FILTER_FLAG_RX_SCATTER
: 0,
549 req
->spec
.match_flags
=
550 EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
551 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
|
552 EFX_FILTER_MATCH_REM_HOST
| EFX_FILTER_MATCH_REM_PORT
;
553 req
->spec
.ether_type
= fk
.basic
.n_proto
;
554 req
->spec
.ip_proto
= fk
.basic
.ip_proto
;
556 if (fk
.basic
.n_proto
== htons(ETH_P_IP
)) {
557 req
->spec
.rem_host
[0] = fk
.addrs
.v4addrs
.src
;
558 req
->spec
.loc_host
[0] = fk
.addrs
.v4addrs
.dst
;
560 memcpy(req
->spec
.rem_host
, &fk
.addrs
.v6addrs
.src
,
561 sizeof(struct in6_addr
));
562 memcpy(req
->spec
.loc_host
, &fk
.addrs
.v6addrs
.dst
,
563 sizeof(struct in6_addr
));
566 req
->spec
.rem_port
= fk
.ports
.src
;
567 req
->spec
.loc_port
= fk
.ports
.dst
;
569 if (efx
->rps_hash_table
) {
570 /* Add it to ARFS hash table */
571 spin_lock(&efx
->rps_hash_lock
);
572 rule
= efx_rps_hash_add(efx
, &req
->spec
, &new);
578 rule
->arfs_id
= efx
->rps_next_id
++ % RPS_NO_FILTER
;
580 /* Skip if existing or pending filter already does the right thing */
581 if (!new && rule
->rxq_index
== rxq_index
&&
582 rule
->filter_id
>= EFX_ARFS_FILTER_ID_PENDING
)
584 rule
->rxq_index
= rxq_index
;
585 rule
->filter_id
= EFX_ARFS_FILTER_ID_PENDING
;
586 spin_unlock(&efx
->rps_hash_lock
);
588 /* Without an ARFS hash table, we just use arfs_id 0 for all
589 * filters. This means if multiple flows hash to the same
590 * flow_id, all but the most recently touched will be eligible
596 /* Queue the request */
597 dev_hold(req
->net_dev
= net_dev
);
598 INIT_WORK(&req
->work
, efx_filter_rfs_work
);
599 req
->rxq_index
= rxq_index
;
600 req
->flow_id
= flow_id
;
601 schedule_work(&req
->work
);
604 spin_unlock(&efx
->rps_hash_lock
);
606 clear_bit(slot_idx
, &efx
->rps_slot_map
);
610 bool __efx_filter_rfs_expire(struct efx_channel
*channel
, unsigned int quota
)
612 bool (*expire_one
)(struct efx_nic
*efx
, u32 flow_id
, unsigned int index
);
613 struct efx_nic
*efx
= channel
->efx
;
614 unsigned int index
, size
, start
;
617 if (!mutex_trylock(&efx
->rps_mutex
))
619 expire_one
= efx
->type
->filter_rfs_expire_one
;
620 index
= channel
->rfs_expire_index
;
622 size
= efx
->type
->max_rx_ip_filters
;
624 flow_id
= channel
->rps_flow_id
[index
];
626 if (flow_id
!= RPS_FLOW_ID_INVALID
) {
628 if (expire_one(efx
, flow_id
, index
)) {
629 netif_info(efx
, rx_status
, efx
->net_dev
,
630 "expired filter %d [channel %u flow %u]\n",
631 index
, channel
->channel
, flow_id
);
632 channel
->rps_flow_id
[index
] = RPS_FLOW_ID_INVALID
;
633 channel
->rfs_filter_count
--;
638 /* If we were called with a quota that exceeds the total number
639 * of filters in the table (which shouldn't happen, but could
640 * if two callers race), ensure that we don't loop forever -
641 * stop when we've examined every row of the table.
647 channel
->rfs_expire_index
= index
;
648 mutex_unlock(&efx
->rps_mutex
);
652 #endif /* CONFIG_RFS_ACCEL */