1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
31 #include "xsk_queue.h"
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
38 void xsk_set_rx_need_wakeup(struct xsk_buff_pool
*pool
)
40 if (pool
->cached_need_wakeup
& XDP_WAKEUP_RX
)
43 pool
->fq
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
44 pool
->cached_need_wakeup
|= XDP_WAKEUP_RX
;
46 EXPORT_SYMBOL(xsk_set_rx_need_wakeup
);
48 void xsk_set_tx_need_wakeup(struct xsk_buff_pool
*pool
)
52 if (pool
->cached_need_wakeup
& XDP_WAKEUP_TX
)
56 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
57 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
61 pool
->cached_need_wakeup
|= XDP_WAKEUP_TX
;
63 EXPORT_SYMBOL(xsk_set_tx_need_wakeup
);
65 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool
*pool
)
67 if (!(pool
->cached_need_wakeup
& XDP_WAKEUP_RX
))
70 pool
->fq
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
71 pool
->cached_need_wakeup
&= ~XDP_WAKEUP_RX
;
73 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup
);
75 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool
*pool
)
79 if (!(pool
->cached_need_wakeup
& XDP_WAKEUP_TX
))
83 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
84 xs
->tx
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
88 pool
->cached_need_wakeup
&= ~XDP_WAKEUP_TX
;
90 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup
);
92 bool xsk_uses_need_wakeup(struct xsk_buff_pool
*pool
)
94 return pool
->uses_need_wakeup
;
96 EXPORT_SYMBOL(xsk_uses_need_wakeup
);
98 struct xsk_buff_pool
*xsk_get_pool_from_qid(struct net_device
*dev
,
101 if (queue_id
< dev
->real_num_rx_queues
)
102 return dev
->_rx
[queue_id
].pool
;
103 if (queue_id
< dev
->real_num_tx_queues
)
104 return dev
->_tx
[queue_id
].pool
;
108 EXPORT_SYMBOL(xsk_get_pool_from_qid
);
110 void xsk_clear_pool_at_qid(struct net_device
*dev
, u16 queue_id
)
112 if (queue_id
< dev
->num_rx_queues
)
113 dev
->_rx
[queue_id
].pool
= NULL
;
114 if (queue_id
< dev
->num_tx_queues
)
115 dev
->_tx
[queue_id
].pool
= NULL
;
118 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
119 * not know if the device has more tx queues than rx, or the opposite.
120 * This might also change during run time.
122 int xsk_reg_pool_at_qid(struct net_device
*dev
, struct xsk_buff_pool
*pool
,
125 if (queue_id
>= max_t(unsigned int,
126 dev
->real_num_rx_queues
,
127 dev
->real_num_tx_queues
))
130 if (queue_id
< dev
->real_num_rx_queues
)
131 dev
->_rx
[queue_id
].pool
= pool
;
132 if (queue_id
< dev
->real_num_tx_queues
)
133 dev
->_tx
[queue_id
].pool
= pool
;
138 static int __xsk_rcv_zc(struct xdp_sock
*xs
, struct xdp_buff_xsk
*xskb
, u32 len
,
144 addr
= xp_get_handle(xskb
, xskb
->pool
);
145 err
= xskq_prod_reserve_desc(xs
->rx
, addr
, len
, flags
);
155 static int xsk_rcv_zc(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
157 struct xdp_buff_xsk
*xskb
= container_of(xdp
, struct xdp_buff_xsk
, xdp
);
158 u32 frags
= xdp_buff_has_frags(xdp
);
159 struct xdp_buff_xsk
*pos
, *tmp
;
160 struct list_head
*xskb_list
;
165 contd
= XDP_PKT_CONTD
;
167 err
= __xsk_rcv_zc(xs
, xskb
, len
, contd
);
173 xskb_list
= &xskb
->pool
->xskb_list
;
174 list_for_each_entry_safe(pos
, tmp
, xskb_list
, list_node
) {
175 if (list_is_singular(xskb_list
))
177 len
= pos
->xdp
.data_end
- pos
->xdp
.data
;
178 err
= __xsk_rcv_zc(xs
, pos
, len
, contd
);
181 list_del(&pos
->list_node
);
190 static void *xsk_copy_xdp_start(struct xdp_buff
*from
)
192 if (unlikely(xdp_data_meta_unsupported(from
)))
195 return from
->data_meta
;
198 static u32
xsk_copy_xdp(void *to
, void **from
, u32 to_len
,
199 u32
*from_len
, skb_frag_t
**frag
, u32 rem
)
204 u32 copy_len
= min_t(u32
, *from_len
, to_len
);
206 memcpy(to
, *from
, copy_len
);
211 if (*from_len
== copy_len
) {
212 *from
= skb_frag_address(*frag
);
213 *from_len
= skb_frag_size((*frag
)++);
216 *from_len
-= copy_len
;
218 if (to_len
== copy_len
)
226 static int __xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
228 u32 frame_size
= xsk_pool_get_rx_frame_size(xs
->pool
);
229 void *copy_from
= xsk_copy_xdp_start(xdp
), *copy_to
;
230 u32 from_len
, meta_len
, rem
, num_desc
;
231 struct xdp_buff_xsk
*xskb
;
232 struct xdp_buff
*xsk_xdp
;
235 from_len
= xdp
->data_end
- copy_from
;
236 meta_len
= xdp
->data
- copy_from
;
237 rem
= len
+ meta_len
;
239 if (len
<= frame_size
&& !xdp_buff_has_frags(xdp
)) {
242 xsk_xdp
= xsk_buff_alloc(xs
->pool
);
247 memcpy(xsk_xdp
->data
- meta_len
, copy_from
, rem
);
248 xskb
= container_of(xsk_xdp
, struct xdp_buff_xsk
, xdp
);
249 err
= __xsk_rcv_zc(xs
, xskb
, len
, 0);
251 xsk_buff_free(xsk_xdp
);
258 num_desc
= (len
- 1) / frame_size
+ 1;
260 if (!xsk_buff_can_alloc(xs
->pool
, num_desc
)) {
264 if (xskq_prod_nb_free(xs
->rx
, num_desc
) < num_desc
) {
269 if (xdp_buff_has_frags(xdp
)) {
270 struct skb_shared_info
*sinfo
;
272 sinfo
= xdp_get_shared_info_from_buff(xdp
);
273 frag
= &sinfo
->frags
[0];
277 u32 to_len
= frame_size
+ meta_len
;
280 xsk_xdp
= xsk_buff_alloc(xs
->pool
);
281 copy_to
= xsk_xdp
->data
- meta_len
;
283 copied
= xsk_copy_xdp(copy_to
, ©_from
, to_len
, &from_len
, &frag
, rem
);
286 xskb
= container_of(xsk_xdp
, struct xdp_buff_xsk
, xdp
);
287 __xsk_rcv_zc(xs
, xskb
, copied
- meta_len
, rem
? XDP_PKT_CONTD
: 0);
294 static bool xsk_tx_writeable(struct xdp_sock
*xs
)
296 if (xskq_cons_present_entries(xs
->tx
) > xs
->tx
->nentries
/ 2)
302 static bool xsk_is_bound(struct xdp_sock
*xs
)
304 if (READ_ONCE(xs
->state
) == XSK_BOUND
) {
305 /* Matches smp_wmb() in bind(). */
312 static int xsk_rcv_check(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
314 if (!xsk_is_bound(xs
))
317 if (xs
->dev
!= xdp
->rxq
->dev
|| xs
->queue_id
!= xdp
->rxq
->queue_index
)
320 if (len
> xsk_pool_get_rx_frame_size(xs
->pool
) && !xs
->sg
) {
328 static void xsk_flush(struct xdp_sock
*xs
)
330 xskq_prod_submit(xs
->rx
);
331 __xskq_cons_release(xs
->pool
->fq
);
332 sock_def_readable(&xs
->sk
);
335 int xsk_generic_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
337 u32 len
= xdp_get_buff_len(xdp
);
340 spin_lock_bh(&xs
->rx_lock
);
341 err
= xsk_rcv_check(xs
, xdp
, len
);
343 err
= __xsk_rcv(xs
, xdp
, len
);
346 spin_unlock_bh(&xs
->rx_lock
);
350 static int xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
352 u32 len
= xdp_get_buff_len(xdp
);
355 err
= xsk_rcv_check(xs
, xdp
, len
);
359 if (xdp
->rxq
->mem
.type
== MEM_TYPE_XSK_BUFF_POOL
) {
360 len
= xdp
->data_end
- xdp
->data
;
361 return xsk_rcv_zc(xs
, xdp
, len
);
364 err
= __xsk_rcv(xs
, xdp
, len
);
366 xdp_return_buff(xdp
);
370 int __xsk_map_redirect(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
374 err
= xsk_rcv(xs
, xdp
);
378 if (!xs
->flush_node
.prev
) {
379 struct list_head
*flush_list
= bpf_net_ctx_get_xskmap_flush_list();
381 list_add(&xs
->flush_node
, flush_list
);
387 void __xsk_map_flush(struct list_head
*flush_list
)
389 struct xdp_sock
*xs
, *tmp
;
391 list_for_each_entry_safe(xs
, tmp
, flush_list
, flush_node
) {
393 __list_del_clearprev(&xs
->flush_node
);
397 void xsk_tx_completed(struct xsk_buff_pool
*pool
, u32 nb_entries
)
399 xskq_prod_submit_n(pool
->cq
, nb_entries
);
401 EXPORT_SYMBOL(xsk_tx_completed
);
403 void xsk_tx_release(struct xsk_buff_pool
*pool
)
408 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
409 __xskq_cons_release(xs
->tx
);
410 if (xsk_tx_writeable(xs
))
411 xs
->sk
.sk_write_space(&xs
->sk
);
415 EXPORT_SYMBOL(xsk_tx_release
);
417 bool xsk_tx_peek_desc(struct xsk_buff_pool
*pool
, struct xdp_desc
*desc
)
419 bool budget_exhausted
= false;
424 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
425 if (xs
->tx_budget_spent
>= MAX_PER_SOCKET_BUDGET
) {
426 budget_exhausted
= true;
430 if (!xskq_cons_peek_desc(xs
->tx
, desc
, pool
)) {
431 if (xskq_has_descs(xs
->tx
))
432 xskq_cons_release(xs
->tx
);
436 xs
->tx_budget_spent
++;
438 /* This is the backpressure mechanism for the Tx path.
439 * Reserve space in the completion queue and only proceed
440 * if there is space in it. This avoids having to implement
441 * any buffering in the Tx path.
443 if (xskq_prod_reserve_addr(pool
->cq
, desc
->addr
))
446 xskq_cons_release(xs
->tx
);
451 if (budget_exhausted
) {
452 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
)
453 xs
->tx_budget_spent
= 0;
455 budget_exhausted
= false;
463 EXPORT_SYMBOL(xsk_tx_peek_desc
);
465 static u32
xsk_tx_peek_release_fallback(struct xsk_buff_pool
*pool
, u32 max_entries
)
467 struct xdp_desc
*descs
= pool
->tx_descs
;
470 while (nb_pkts
< max_entries
&& xsk_tx_peek_desc(pool
, &descs
[nb_pkts
]))
473 xsk_tx_release(pool
);
477 u32
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool
*pool
, u32 nb_pkts
)
482 if (!list_is_singular(&pool
->xsk_tx_list
)) {
483 /* Fallback to the non-batched version */
485 return xsk_tx_peek_release_fallback(pool
, nb_pkts
);
488 xs
= list_first_or_null_rcu(&pool
->xsk_tx_list
, struct xdp_sock
, tx_list
);
494 nb_pkts
= xskq_cons_nb_entries(xs
->tx
, nb_pkts
);
496 /* This is the backpressure mechanism for the Tx path. Try to
497 * reserve space in the completion queue for all packets, but
498 * if there are fewer slots available, just process that many
499 * packets. This avoids having to implement any buffering in
502 nb_pkts
= xskq_prod_nb_free(pool
->cq
, nb_pkts
);
506 nb_pkts
= xskq_cons_read_desc_batch(xs
->tx
, pool
, nb_pkts
);
508 xs
->tx
->queue_empty_descs
++;
512 __xskq_cons_release(xs
->tx
);
513 xskq_prod_write_addr_batch(pool
->cq
, pool
->tx_descs
, nb_pkts
);
514 xs
->sk
.sk_write_space(&xs
->sk
);
520 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch
);
522 static int xsk_wakeup(struct xdp_sock
*xs
, u8 flags
)
524 struct net_device
*dev
= xs
->dev
;
526 return dev
->netdev_ops
->ndo_xsk_wakeup(dev
, xs
->queue_id
, flags
);
529 static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool
*pool
, u64 addr
)
534 spin_lock_irqsave(&pool
->cq_lock
, flags
);
535 ret
= xskq_prod_reserve_addr(pool
->cq
, addr
);
536 spin_unlock_irqrestore(&pool
->cq_lock
, flags
);
541 static void xsk_cq_submit_locked(struct xsk_buff_pool
*pool
, u32 n
)
545 spin_lock_irqsave(&pool
->cq_lock
, flags
);
546 xskq_prod_submit_n(pool
->cq
, n
);
547 spin_unlock_irqrestore(&pool
->cq_lock
, flags
);
550 static void xsk_cq_cancel_locked(struct xsk_buff_pool
*pool
, u32 n
)
554 spin_lock_irqsave(&pool
->cq_lock
, flags
);
555 xskq_prod_cancel_n(pool
->cq
, n
);
556 spin_unlock_irqrestore(&pool
->cq_lock
, flags
);
559 static u32
xsk_get_num_desc(struct sk_buff
*skb
)
561 return skb
? (long)skb_shinfo(skb
)->destructor_arg
: 0;
564 static void xsk_destruct_skb(struct sk_buff
*skb
)
566 struct xsk_tx_metadata_compl
*compl = &skb_shinfo(skb
)->xsk_meta
;
568 if (compl->tx_timestamp
) {
569 /* sw completion timestamp, not a real one */
570 *compl->tx_timestamp
= ktime_get_tai_fast_ns();
573 xsk_cq_submit_locked(xdp_sk(skb
->sk
)->pool
, xsk_get_num_desc(skb
));
577 static void xsk_set_destructor_arg(struct sk_buff
*skb
)
579 long num
= xsk_get_num_desc(xdp_sk(skb
->sk
)->skb
) + 1;
581 skb_shinfo(skb
)->destructor_arg
= (void *)num
;
584 static void xsk_consume_skb(struct sk_buff
*skb
)
586 struct xdp_sock
*xs
= xdp_sk(skb
->sk
);
588 skb
->destructor
= sock_wfree
;
589 xsk_cq_cancel_locked(xs
->pool
, xsk_get_num_desc(skb
));
590 /* Free skb without triggering the perf drop trace */
595 static void xsk_drop_skb(struct sk_buff
*skb
)
597 xdp_sk(skb
->sk
)->tx
->invalid_descs
+= xsk_get_num_desc(skb
);
598 xsk_consume_skb(skb
);
601 static struct sk_buff
*xsk_build_skb_zerocopy(struct xdp_sock
*xs
,
602 struct xdp_desc
*desc
)
604 struct xsk_buff_pool
*pool
= xs
->pool
;
605 u32 hr
, len
, ts
, offset
, copy
, copied
;
606 struct sk_buff
*skb
= xs
->skb
;
613 hr
= max(NET_SKB_PAD
, L1_CACHE_ALIGN(xs
->dev
->needed_headroom
));
615 skb
= sock_alloc_send_skb(&xs
->sk
, hr
, 1, &err
);
619 skb_reserve(skb
, hr
);
624 ts
= pool
->unaligned
? len
: pool
->chunk_size
;
626 buffer
= xsk_buff_raw_get_data(pool
, addr
);
627 offset
= offset_in_page(buffer
);
628 addr
= buffer
- pool
->addrs
;
630 for (copied
= 0, i
= skb_shinfo(skb
)->nr_frags
; copied
< len
; i
++) {
631 if (unlikely(i
>= MAX_SKB_FRAGS
))
632 return ERR_PTR(-EOVERFLOW
);
634 page
= pool
->umem
->pgs
[addr
>> PAGE_SHIFT
];
637 copy
= min_t(u32
, PAGE_SIZE
- offset
, len
- copied
);
638 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
646 skb
->data_len
+= len
;
649 refcount_add(ts
, &xs
->sk
.sk_wmem_alloc
);
654 static struct sk_buff
*xsk_build_skb(struct xdp_sock
*xs
,
655 struct xdp_desc
*desc
)
657 struct xsk_tx_metadata
*meta
= NULL
;
658 struct net_device
*dev
= xs
->dev
;
659 struct sk_buff
*skb
= xs
->skb
;
660 bool first_frag
= false;
663 if (dev
->priv_flags
& IFF_TX_SKB_NO_LINEAR
) {
664 skb
= xsk_build_skb_zerocopy(xs
, desc
);
673 buffer
= xsk_buff_raw_get_data(xs
->pool
, desc
->addr
);
679 hr
= max(NET_SKB_PAD
, L1_CACHE_ALIGN(dev
->needed_headroom
));
680 tr
= dev
->needed_tailroom
;
681 skb
= sock_alloc_send_skb(&xs
->sk
, hr
+ len
+ tr
, 1, &err
);
685 skb_reserve(skb
, hr
);
688 err
= skb_store_bits(skb
, 0, buffer
, len
);
692 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
696 if (unlikely(nr_frags
== (MAX_SKB_FRAGS
- 1) && xp_mb_desc(desc
))) {
701 page
= alloc_page(xs
->sk
.sk_allocation
);
702 if (unlikely(!page
)) {
707 vaddr
= kmap_local_page(page
);
708 memcpy(vaddr
, buffer
, len
);
711 skb_add_rx_frag(skb
, nr_frags
, page
, 0, len
, PAGE_SIZE
);
712 refcount_add(PAGE_SIZE
, &xs
->sk
.sk_wmem_alloc
);
715 if (first_frag
&& desc
->options
& XDP_TX_METADATA
) {
716 if (unlikely(xs
->pool
->tx_metadata_len
== 0)) {
721 meta
= buffer
- xs
->pool
->tx_metadata_len
;
722 if (unlikely(!xsk_buff_valid_tx_metadata(meta
))) {
727 if (meta
->flags
& XDP_TXMD_FLAGS_CHECKSUM
) {
728 if (unlikely(meta
->request
.csum_start
+
729 meta
->request
.csum_offset
+
730 sizeof(__sum16
) > len
)) {
735 skb
->csum_start
= hr
+ meta
->request
.csum_start
;
736 skb
->csum_offset
= meta
->request
.csum_offset
;
737 skb
->ip_summed
= CHECKSUM_PARTIAL
;
739 if (unlikely(xs
->pool
->tx_sw_csum
)) {
740 err
= skb_checksum_help(skb
);
749 skb
->priority
= READ_ONCE(xs
->sk
.sk_priority
);
750 skb
->mark
= READ_ONCE(xs
->sk
.sk_mark
);
751 skb
->destructor
= xsk_destruct_skb
;
752 xsk_tx_metadata_to_compl(meta
, &skb_shinfo(skb
)->xsk_meta
);
753 xsk_set_destructor_arg(skb
);
758 if (first_frag
&& skb
)
761 if (err
== -EOVERFLOW
) {
762 /* Drop the packet */
763 xsk_set_destructor_arg(xs
->skb
);
764 xsk_drop_skb(xs
->skb
);
765 xskq_cons_release(xs
->tx
);
767 /* Let application retry */
768 xsk_cq_cancel_locked(xs
->pool
, 1);
774 static int __xsk_generic_xmit(struct sock
*sk
)
776 struct xdp_sock
*xs
= xdp_sk(sk
);
777 u32 max_batch
= TX_BATCH_SIZE
;
778 bool sent_frame
= false;
779 struct xdp_desc desc
;
783 mutex_lock(&xs
->mutex
);
785 /* Since we dropped the RCU read lock, the socket state might have changed. */
786 if (unlikely(!xsk_is_bound(xs
))) {
791 if (xs
->queue_id
>= xs
->dev
->real_num_tx_queues
)
794 while (xskq_cons_peek_desc(xs
->tx
, &desc
, xs
->pool
)) {
795 if (max_batch
-- == 0) {
800 /* This is the backpressure mechanism for the Tx path.
801 * Reserve space in the completion queue and only proceed
802 * if there is space in it. This avoids having to implement
803 * any buffering in the Tx path.
805 if (xsk_cq_reserve_addr_locked(xs
->pool
, desc
.addr
))
808 skb
= xsk_build_skb(xs
, &desc
);
811 if (err
!= -EOVERFLOW
)
817 xskq_cons_release(xs
->tx
);
819 if (xp_mb_desc(&desc
)) {
824 err
= __dev_direct_xmit(skb
, xs
->queue_id
);
825 if (err
== NETDEV_TX_BUSY
) {
826 /* Tell user-space to retry the send */
827 xskq_cons_cancel_n(xs
->tx
, xsk_get_num_desc(skb
));
828 xsk_consume_skb(skb
);
833 /* Ignore NET_XMIT_CN as packet might have been sent */
834 if (err
== NET_XMIT_DROP
) {
835 /* SKB completed but not sent */
845 if (xskq_has_descs(xs
->tx
)) {
847 xsk_drop_skb(xs
->skb
);
848 xskq_cons_release(xs
->tx
);
853 if (xsk_tx_writeable(xs
))
854 sk
->sk_write_space(sk
);
856 mutex_unlock(&xs
->mutex
);
860 static int xsk_generic_xmit(struct sock
*sk
)
864 /* Drop the RCU lock since the SKB path might sleep. */
866 ret
= __xsk_generic_xmit(sk
);
867 /* Reaquire RCU lock before going into common code. */
873 static bool xsk_no_wakeup(struct sock
*sk
)
875 #ifdef CONFIG_NET_RX_BUSY_POLL
876 /* Prefer busy-polling, skip the wakeup. */
877 return READ_ONCE(sk
->sk_prefer_busy_poll
) && READ_ONCE(sk
->sk_ll_usec
) &&
878 READ_ONCE(sk
->sk_napi_id
) >= MIN_NAPI_ID
;
884 static int xsk_check_common(struct xdp_sock
*xs
)
886 if (unlikely(!xsk_is_bound(xs
)))
888 if (unlikely(!(xs
->dev
->flags
& IFF_UP
)))
894 static int __xsk_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
896 bool need_wait
= !(m
->msg_flags
& MSG_DONTWAIT
);
897 struct sock
*sk
= sock
->sk
;
898 struct xdp_sock
*xs
= xdp_sk(sk
);
899 struct xsk_buff_pool
*pool
;
902 err
= xsk_check_common(xs
);
905 if (unlikely(need_wait
))
907 if (unlikely(!xs
->tx
))
910 if (sk_can_busy_loop(sk
))
911 sk_busy_loop(sk
, 1); /* only support non-blocking sockets */
913 if (xs
->zc
&& xsk_no_wakeup(sk
))
917 if (pool
->cached_need_wakeup
& XDP_WAKEUP_TX
) {
919 return xsk_wakeup(xs
, XDP_WAKEUP_TX
);
920 return xsk_generic_xmit(sk
);
925 static int xsk_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
930 ret
= __xsk_sendmsg(sock
, m
, total_len
);
936 static int __xsk_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t len
, int flags
)
938 bool need_wait
= !(flags
& MSG_DONTWAIT
);
939 struct sock
*sk
= sock
->sk
;
940 struct xdp_sock
*xs
= xdp_sk(sk
);
943 err
= xsk_check_common(xs
);
946 if (unlikely(!xs
->rx
))
948 if (unlikely(need_wait
))
951 if (sk_can_busy_loop(sk
))
952 sk_busy_loop(sk
, 1); /* only support non-blocking sockets */
954 if (xsk_no_wakeup(sk
))
957 if (xs
->pool
->cached_need_wakeup
& XDP_WAKEUP_RX
&& xs
->zc
)
958 return xsk_wakeup(xs
, XDP_WAKEUP_RX
);
962 static int xsk_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t len
, int flags
)
967 ret
= __xsk_recvmsg(sock
, m
, len
, flags
);
973 static __poll_t
xsk_poll(struct file
*file
, struct socket
*sock
,
974 struct poll_table_struct
*wait
)
977 struct sock
*sk
= sock
->sk
;
978 struct xdp_sock
*xs
= xdp_sk(sk
);
979 struct xsk_buff_pool
*pool
;
981 sock_poll_wait(file
, sock
, wait
);
984 if (xsk_check_common(xs
))
989 if (pool
->cached_need_wakeup
) {
991 xsk_wakeup(xs
, pool
->cached_need_wakeup
);
993 /* Poll needs to drive Tx also in copy mode */
994 xsk_generic_xmit(sk
);
997 if (xs
->rx
&& !xskq_prod_is_empty(xs
->rx
))
998 mask
|= EPOLLIN
| EPOLLRDNORM
;
999 if (xs
->tx
&& xsk_tx_writeable(xs
))
1000 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1006 static int xsk_init_queue(u32 entries
, struct xsk_queue
**queue
,
1009 struct xsk_queue
*q
;
1011 if (entries
== 0 || *queue
|| !is_power_of_2(entries
))
1014 q
= xskq_create(entries
, umem_queue
);
1018 /* Make sure queue is ready before it can be seen by others */
1020 WRITE_ONCE(*queue
, q
);
1024 static void xsk_unbind_dev(struct xdp_sock
*xs
)
1026 struct net_device
*dev
= xs
->dev
;
1028 if (xs
->state
!= XSK_BOUND
)
1030 WRITE_ONCE(xs
->state
, XSK_UNBOUND
);
1032 /* Wait for driver to stop using the xdp socket. */
1033 xp_del_xsk(xs
->pool
, xs
);
1038 static struct xsk_map
*xsk_get_map_list_entry(struct xdp_sock
*xs
,
1039 struct xdp_sock __rcu
***map_entry
)
1041 struct xsk_map
*map
= NULL
;
1042 struct xsk_map_node
*node
;
1046 spin_lock_bh(&xs
->map_list_lock
);
1047 node
= list_first_entry_or_null(&xs
->map_list
, struct xsk_map_node
,
1050 bpf_map_inc(&node
->map
->map
);
1052 *map_entry
= node
->map_entry
;
1054 spin_unlock_bh(&xs
->map_list_lock
);
1058 static void xsk_delete_from_maps(struct xdp_sock
*xs
)
1060 /* This function removes the current XDP socket from all the
1061 * maps it resides in. We need to take extra care here, due to
1062 * the two locks involved. Each map has a lock synchronizing
1063 * updates to the entries, and each socket has a lock that
1064 * synchronizes access to the list of maps (map_list). For
1065 * deadlock avoidance the locks need to be taken in the order
1066 * "map lock"->"socket map list lock". We start off by
1067 * accessing the socket map list, and take a reference to the
1068 * map to guarantee existence between the
1069 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1070 * calls. Then we ask the map to remove the socket, which
1071 * tries to remove the socket from the map. Note that there
1072 * might be updates to the map between
1073 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1075 struct xdp_sock __rcu
**map_entry
= NULL
;
1076 struct xsk_map
*map
;
1078 while ((map
= xsk_get_map_list_entry(xs
, &map_entry
))) {
1079 xsk_map_try_sock_delete(map
, xs
, map_entry
);
1080 bpf_map_put(&map
->map
);
1084 static int xsk_release(struct socket
*sock
)
1086 struct sock
*sk
= sock
->sk
;
1087 struct xdp_sock
*xs
= xdp_sk(sk
);
1096 xsk_drop_skb(xs
->skb
);
1098 mutex_lock(&net
->xdp
.lock
);
1099 sk_del_node_init_rcu(sk
);
1100 mutex_unlock(&net
->xdp
.lock
);
1102 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
1104 xsk_delete_from_maps(xs
);
1105 mutex_lock(&xs
->mutex
);
1107 mutex_unlock(&xs
->mutex
);
1109 xskq_destroy(xs
->rx
);
1110 xskq_destroy(xs
->tx
);
1111 xskq_destroy(xs
->fq_tmp
);
1112 xskq_destroy(xs
->cq_tmp
);
1122 static struct socket
*xsk_lookup_xsk_from_fd(int fd
)
1124 struct socket
*sock
;
1127 sock
= sockfd_lookup(fd
, &err
);
1129 return ERR_PTR(-ENOTSOCK
);
1131 if (sock
->sk
->sk_family
!= PF_XDP
) {
1133 return ERR_PTR(-ENOPROTOOPT
);
1139 static bool xsk_validate_queues(struct xdp_sock
*xs
)
1141 return xs
->fq_tmp
&& xs
->cq_tmp
;
1144 static int xsk_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
1146 struct sockaddr_xdp
*sxdp
= (struct sockaddr_xdp
*)addr
;
1147 struct sock
*sk
= sock
->sk
;
1148 struct xdp_sock
*xs
= xdp_sk(sk
);
1149 struct net_device
*dev
;
1154 if (addr_len
< sizeof(struct sockaddr_xdp
))
1156 if (sxdp
->sxdp_family
!= AF_XDP
)
1159 flags
= sxdp
->sxdp_flags
;
1160 if (flags
& ~(XDP_SHARED_UMEM
| XDP_COPY
| XDP_ZEROCOPY
|
1161 XDP_USE_NEED_WAKEUP
| XDP_USE_SG
))
1164 bound_dev_if
= READ_ONCE(sk
->sk_bound_dev_if
);
1165 if (bound_dev_if
&& bound_dev_if
!= sxdp
->sxdp_ifindex
)
1169 mutex_lock(&xs
->mutex
);
1170 if (xs
->state
!= XSK_READY
) {
1175 dev
= dev_get_by_index(sock_net(sk
), sxdp
->sxdp_ifindex
);
1181 if (!xs
->rx
&& !xs
->tx
) {
1186 qid
= sxdp
->sxdp_queue_id
;
1188 if (flags
& XDP_SHARED_UMEM
) {
1189 struct xdp_sock
*umem_xs
;
1190 struct socket
*sock
;
1192 if ((flags
& XDP_COPY
) || (flags
& XDP_ZEROCOPY
) ||
1193 (flags
& XDP_USE_NEED_WAKEUP
) || (flags
& XDP_USE_SG
)) {
1194 /* Cannot specify flags for shared sockets. */
1200 /* We have already our own. */
1205 sock
= xsk_lookup_xsk_from_fd(sxdp
->sxdp_shared_umem_fd
);
1207 err
= PTR_ERR(sock
);
1211 umem_xs
= xdp_sk(sock
->sk
);
1212 if (!xsk_is_bound(umem_xs
)) {
1218 if (umem_xs
->queue_id
!= qid
|| umem_xs
->dev
!= dev
) {
1219 /* Share the umem with another socket on another qid
1222 xs
->pool
= xp_create_and_assign_umem(xs
,
1230 err
= xp_assign_dev_shared(xs
->pool
, umem_xs
, dev
,
1233 xp_destroy(xs
->pool
);
1239 /* Share the buffer pool with the other socket. */
1240 if (xs
->fq_tmp
|| xs
->cq_tmp
) {
1241 /* Do not allow setting your own fq or cq. */
1247 xp_get_pool(umem_xs
->pool
);
1248 xs
->pool
= umem_xs
->pool
;
1250 /* If underlying shared umem was created without Tx
1251 * ring, allocate Tx descs array that Tx batching API
1254 if (xs
->tx
&& !xs
->pool
->tx_descs
) {
1255 err
= xp_alloc_tx_descs(xs
->pool
, xs
);
1257 xp_put_pool(xs
->pool
);
1265 xdp_get_umem(umem_xs
->umem
);
1266 WRITE_ONCE(xs
->umem
, umem_xs
->umem
);
1268 } else if (!xs
->umem
|| !xsk_validate_queues(xs
)) {
1272 /* This xsk has its own umem. */
1273 xs
->pool
= xp_create_and_assign_umem(xs
, xs
->umem
);
1279 err
= xp_assign_dev(xs
->pool
, dev
, qid
, flags
);
1281 xp_destroy(xs
->pool
);
1287 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1292 xs
->zc
= xs
->umem
->zc
;
1293 xs
->sg
= !!(xs
->umem
->flags
& XDP_UMEM_SG_FLAG
);
1295 xp_add_xsk(xs
->pool
, xs
);
1297 if (xs
->zc
&& qid
< dev
->real_num_rx_queues
) {
1298 struct netdev_rx_queue
*rxq
;
1300 rxq
= __netif_get_rx_queue(dev
, qid
);
1302 __sk_mark_napi_id_once(sk
, rxq
->napi
->napi_id
);
1309 /* Matches smp_rmb() in bind() for shared umem
1310 * sockets, and xsk_is_bound().
1313 WRITE_ONCE(xs
->state
, XSK_BOUND
);
1316 mutex_unlock(&xs
->mutex
);
1321 struct xdp_umem_reg_v1
{
1322 __u64 addr
; /* Start of packet data area */
1323 __u64 len
; /* Length of packet data area */
1328 static int xsk_setsockopt(struct socket
*sock
, int level
, int optname
,
1329 sockptr_t optval
, unsigned int optlen
)
1331 struct sock
*sk
= sock
->sk
;
1332 struct xdp_sock
*xs
= xdp_sk(sk
);
1335 if (level
!= SOL_XDP
)
1336 return -ENOPROTOOPT
;
1342 struct xsk_queue
**q
;
1345 if (optlen
< sizeof(entries
))
1347 if (copy_from_sockptr(&entries
, optval
, sizeof(entries
)))
1350 mutex_lock(&xs
->mutex
);
1351 if (xs
->state
!= XSK_READY
) {
1352 mutex_unlock(&xs
->mutex
);
1355 q
= (optname
== XDP_TX_RING
) ? &xs
->tx
: &xs
->rx
;
1356 err
= xsk_init_queue(entries
, q
, false);
1357 if (!err
&& optname
== XDP_TX_RING
)
1358 /* Tx needs to be explicitly woken up the first time */
1359 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
1360 mutex_unlock(&xs
->mutex
);
1365 size_t mr_size
= sizeof(struct xdp_umem_reg
);
1366 struct xdp_umem_reg mr
= {};
1367 struct xdp_umem
*umem
;
1369 if (optlen
< sizeof(struct xdp_umem_reg_v1
))
1371 else if (optlen
< sizeof(mr
))
1372 mr_size
= sizeof(struct xdp_umem_reg_v1
);
1374 BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1
) >= sizeof(struct xdp_umem_reg
));
1376 /* Make sure the last field of the struct doesn't have
1377 * uninitialized padding. All padding has to be explicit
1378 * and has to be set to zero by the userspace to make
1379 * struct xdp_umem_reg extensible in the future.
1381 BUILD_BUG_ON(offsetof(struct xdp_umem_reg
, tx_metadata_len
) +
1382 sizeof_field(struct xdp_umem_reg
, tx_metadata_len
) !=
1383 sizeof(struct xdp_umem_reg
));
1385 if (copy_from_sockptr(&mr
, optval
, mr_size
))
1388 mutex_lock(&xs
->mutex
);
1389 if (xs
->state
!= XSK_READY
|| xs
->umem
) {
1390 mutex_unlock(&xs
->mutex
);
1394 umem
= xdp_umem_create(&mr
);
1396 mutex_unlock(&xs
->mutex
);
1397 return PTR_ERR(umem
);
1400 /* Make sure umem is ready before it can be seen by others */
1402 WRITE_ONCE(xs
->umem
, umem
);
1403 mutex_unlock(&xs
->mutex
);
1406 case XDP_UMEM_FILL_RING
:
1407 case XDP_UMEM_COMPLETION_RING
:
1409 struct xsk_queue
**q
;
1412 if (optlen
< sizeof(entries
))
1414 if (copy_from_sockptr(&entries
, optval
, sizeof(entries
)))
1417 mutex_lock(&xs
->mutex
);
1418 if (xs
->state
!= XSK_READY
) {
1419 mutex_unlock(&xs
->mutex
);
1423 q
= (optname
== XDP_UMEM_FILL_RING
) ? &xs
->fq_tmp
:
1425 err
= xsk_init_queue(entries
, q
, true);
1426 mutex_unlock(&xs
->mutex
);
1433 return -ENOPROTOOPT
;
1436 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1
*ring
)
1438 ring
->producer
= offsetof(struct xdp_rxtx_ring
, ptrs
.producer
);
1439 ring
->consumer
= offsetof(struct xdp_rxtx_ring
, ptrs
.consumer
);
1440 ring
->desc
= offsetof(struct xdp_rxtx_ring
, desc
);
1443 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1
*ring
)
1445 ring
->producer
= offsetof(struct xdp_umem_ring
, ptrs
.producer
);
1446 ring
->consumer
= offsetof(struct xdp_umem_ring
, ptrs
.consumer
);
1447 ring
->desc
= offsetof(struct xdp_umem_ring
, desc
);
1450 struct xdp_statistics_v1
{
1452 __u64 rx_invalid_descs
;
1453 __u64 tx_invalid_descs
;
1456 static int xsk_getsockopt(struct socket
*sock
, int level
, int optname
,
1457 char __user
*optval
, int __user
*optlen
)
1459 struct sock
*sk
= sock
->sk
;
1460 struct xdp_sock
*xs
= xdp_sk(sk
);
1463 if (level
!= SOL_XDP
)
1464 return -ENOPROTOOPT
;
1466 if (get_user(len
, optlen
))
1472 case XDP_STATISTICS
:
1474 struct xdp_statistics stats
= {};
1475 bool extra_stats
= true;
1478 if (len
< sizeof(struct xdp_statistics_v1
)) {
1480 } else if (len
< sizeof(stats
)) {
1481 extra_stats
= false;
1482 stats_size
= sizeof(struct xdp_statistics_v1
);
1484 stats_size
= sizeof(stats
);
1487 mutex_lock(&xs
->mutex
);
1488 stats
.rx_dropped
= xs
->rx_dropped
;
1490 stats
.rx_ring_full
= xs
->rx_queue_full
;
1491 stats
.rx_fill_ring_empty_descs
=
1492 xs
->pool
? xskq_nb_queue_empty_descs(xs
->pool
->fq
) : 0;
1493 stats
.tx_ring_empty_descs
= xskq_nb_queue_empty_descs(xs
->tx
);
1495 stats
.rx_dropped
+= xs
->rx_queue_full
;
1497 stats
.rx_invalid_descs
= xskq_nb_invalid_descs(xs
->rx
);
1498 stats
.tx_invalid_descs
= xskq_nb_invalid_descs(xs
->tx
);
1499 mutex_unlock(&xs
->mutex
);
1501 if (copy_to_user(optval
, &stats
, stats_size
))
1503 if (put_user(stats_size
, optlen
))
1508 case XDP_MMAP_OFFSETS
:
1510 struct xdp_mmap_offsets off
;
1511 struct xdp_mmap_offsets_v1 off_v1
;
1512 bool flags_supported
= true;
1515 if (len
< sizeof(off_v1
))
1517 else if (len
< sizeof(off
))
1518 flags_supported
= false;
1520 if (flags_supported
) {
1521 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1522 * except for the flags field added to the end.
1524 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
1526 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
1528 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
1530 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
1532 off
.rx
.flags
= offsetof(struct xdp_rxtx_ring
,
1534 off
.tx
.flags
= offsetof(struct xdp_rxtx_ring
,
1536 off
.fr
.flags
= offsetof(struct xdp_umem_ring
,
1538 off
.cr
.flags
= offsetof(struct xdp_umem_ring
,
1544 xsk_enter_rxtx_offsets(&off_v1
.rx
);
1545 xsk_enter_rxtx_offsets(&off_v1
.tx
);
1546 xsk_enter_umem_offsets(&off_v1
.fr
);
1547 xsk_enter_umem_offsets(&off_v1
.cr
);
1549 len
= sizeof(off_v1
);
1553 if (copy_to_user(optval
, to_copy
, len
))
1555 if (put_user(len
, optlen
))
1562 struct xdp_options opts
= {};
1564 if (len
< sizeof(opts
))
1567 mutex_lock(&xs
->mutex
);
1569 opts
.flags
|= XDP_OPTIONS_ZEROCOPY
;
1570 mutex_unlock(&xs
->mutex
);
1573 if (copy_to_user(optval
, &opts
, len
))
1575 if (put_user(len
, optlen
))
1587 static int xsk_mmap(struct file
*file
, struct socket
*sock
,
1588 struct vm_area_struct
*vma
)
1590 loff_t offset
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
1591 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
1592 struct xdp_sock
*xs
= xdp_sk(sock
->sk
);
1593 int state
= READ_ONCE(xs
->state
);
1594 struct xsk_queue
*q
= NULL
;
1596 if (state
!= XSK_READY
&& state
!= XSK_BOUND
)
1599 if (offset
== XDP_PGOFF_RX_RING
) {
1600 q
= READ_ONCE(xs
->rx
);
1601 } else if (offset
== XDP_PGOFF_TX_RING
) {
1602 q
= READ_ONCE(xs
->tx
);
1604 /* Matches the smp_wmb() in XDP_UMEM_REG */
1606 if (offset
== XDP_UMEM_PGOFF_FILL_RING
)
1607 q
= state
== XSK_READY
? READ_ONCE(xs
->fq_tmp
) :
1608 READ_ONCE(xs
->pool
->fq
);
1609 else if (offset
== XDP_UMEM_PGOFF_COMPLETION_RING
)
1610 q
= state
== XSK_READY
? READ_ONCE(xs
->cq_tmp
) :
1611 READ_ONCE(xs
->pool
->cq
);
1617 /* Matches the smp_wmb() in xsk_init_queue */
1619 if (size
> q
->ring_vmalloc_size
)
1622 return remap_vmalloc_range(vma
, q
->ring
, 0);
1625 static int xsk_notifier(struct notifier_block
*this,
1626 unsigned long msg
, void *ptr
)
1628 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1629 struct net
*net
= dev_net(dev
);
1633 case NETDEV_UNREGISTER
:
1634 mutex_lock(&net
->xdp
.lock
);
1635 sk_for_each(sk
, &net
->xdp
.list
) {
1636 struct xdp_sock
*xs
= xdp_sk(sk
);
1638 mutex_lock(&xs
->mutex
);
1639 if (xs
->dev
== dev
) {
1640 sk
->sk_err
= ENETDOWN
;
1641 if (!sock_flag(sk
, SOCK_DEAD
))
1642 sk_error_report(sk
);
1646 /* Clear device references. */
1647 xp_clear_dev(xs
->pool
);
1649 mutex_unlock(&xs
->mutex
);
1651 mutex_unlock(&net
->xdp
.lock
);
1657 static struct proto xsk_proto
= {
1659 .owner
= THIS_MODULE
,
1660 .obj_size
= sizeof(struct xdp_sock
),
1663 static const struct proto_ops xsk_proto_ops
= {
1665 .owner
= THIS_MODULE
,
1666 .release
= xsk_release
,
1668 .connect
= sock_no_connect
,
1669 .socketpair
= sock_no_socketpair
,
1670 .accept
= sock_no_accept
,
1671 .getname
= sock_no_getname
,
1673 .ioctl
= sock_no_ioctl
,
1674 .listen
= sock_no_listen
,
1675 .shutdown
= sock_no_shutdown
,
1676 .setsockopt
= xsk_setsockopt
,
1677 .getsockopt
= xsk_getsockopt
,
1678 .sendmsg
= xsk_sendmsg
,
1679 .recvmsg
= xsk_recvmsg
,
1683 static void xsk_destruct(struct sock
*sk
)
1685 struct xdp_sock
*xs
= xdp_sk(sk
);
1687 if (!sock_flag(sk
, SOCK_DEAD
))
1690 if (!xp_put_pool(xs
->pool
))
1691 xdp_put_umem(xs
->umem
, !xs
->pool
);
1694 static int xsk_create(struct net
*net
, struct socket
*sock
, int protocol
,
1697 struct xdp_sock
*xs
;
1700 if (!ns_capable(net
->user_ns
, CAP_NET_RAW
))
1702 if (sock
->type
!= SOCK_RAW
)
1703 return -ESOCKTNOSUPPORT
;
1706 return -EPROTONOSUPPORT
;
1708 sock
->state
= SS_UNCONNECTED
;
1710 sk
= sk_alloc(net
, PF_XDP
, GFP_KERNEL
, &xsk_proto
, kern
);
1714 sock
->ops
= &xsk_proto_ops
;
1716 sock_init_data(sock
, sk
);
1718 sk
->sk_family
= PF_XDP
;
1720 sk
->sk_destruct
= xsk_destruct
;
1722 sock_set_flag(sk
, SOCK_RCU_FREE
);
1725 xs
->state
= XSK_READY
;
1726 mutex_init(&xs
->mutex
);
1727 spin_lock_init(&xs
->rx_lock
);
1729 INIT_LIST_HEAD(&xs
->map_list
);
1730 spin_lock_init(&xs
->map_list_lock
);
1732 mutex_lock(&net
->xdp
.lock
);
1733 sk_add_node_rcu(sk
, &net
->xdp
.list
);
1734 mutex_unlock(&net
->xdp
.lock
);
1736 sock_prot_inuse_add(net
, &xsk_proto
, 1);
1741 static const struct net_proto_family xsk_family_ops
= {
1743 .create
= xsk_create
,
1744 .owner
= THIS_MODULE
,
1747 static struct notifier_block xsk_netdev_notifier
= {
1748 .notifier_call
= xsk_notifier
,
1751 static int __net_init
xsk_net_init(struct net
*net
)
1753 mutex_init(&net
->xdp
.lock
);
1754 INIT_HLIST_HEAD(&net
->xdp
.list
);
1758 static void __net_exit
xsk_net_exit(struct net
*net
)
1760 WARN_ON_ONCE(!hlist_empty(&net
->xdp
.list
));
1763 static struct pernet_operations xsk_net_ops
= {
1764 .init
= xsk_net_init
,
1765 .exit
= xsk_net_exit
,
1768 static int __init
xsk_init(void)
1772 err
= proto_register(&xsk_proto
, 0 /* no slab */);
1776 err
= sock_register(&xsk_family_ops
);
1780 err
= register_pernet_subsys(&xsk_net_ops
);
1784 err
= register_netdevice_notifier(&xsk_netdev_notifier
);
1791 unregister_pernet_subsys(&xsk_net_ops
);
1793 sock_unregister(PF_XDP
);
1795 proto_unregister(&xsk_proto
);
1800 fs_initcall(xsk_init
);