1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 static DEFINE_PER_CPU(struct list_head
, xskmap_flush_list
);
36 bool xsk_is_setup_for_bpf_map(struct xdp_sock
*xs
)
38 return READ_ONCE(xs
->rx
) && READ_ONCE(xs
->umem
) &&
39 READ_ONCE(xs
->umem
->fq
);
42 bool xsk_umem_has_addrs(struct xdp_umem
*umem
, u32 cnt
)
44 return xskq_cons_has_entries(umem
->fq
, cnt
);
46 EXPORT_SYMBOL(xsk_umem_has_addrs
);
48 bool xsk_umem_peek_addr(struct xdp_umem
*umem
, u64
*addr
)
50 return xskq_cons_peek_addr(umem
->fq
, addr
, umem
);
52 EXPORT_SYMBOL(xsk_umem_peek_addr
);
54 void xsk_umem_release_addr(struct xdp_umem
*umem
)
56 xskq_cons_release(umem
->fq
);
58 EXPORT_SYMBOL(xsk_umem_release_addr
);
60 void xsk_set_rx_need_wakeup(struct xdp_umem
*umem
)
62 if (umem
->need_wakeup
& XDP_WAKEUP_RX
)
65 umem
->fq
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
66 umem
->need_wakeup
|= XDP_WAKEUP_RX
;
68 EXPORT_SYMBOL(xsk_set_rx_need_wakeup
);
70 void xsk_set_tx_need_wakeup(struct xdp_umem
*umem
)
74 if (umem
->need_wakeup
& XDP_WAKEUP_TX
)
78 list_for_each_entry_rcu(xs
, &umem
->xsk_list
, list
) {
79 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
83 umem
->need_wakeup
|= XDP_WAKEUP_TX
;
85 EXPORT_SYMBOL(xsk_set_tx_need_wakeup
);
87 void xsk_clear_rx_need_wakeup(struct xdp_umem
*umem
)
89 if (!(umem
->need_wakeup
& XDP_WAKEUP_RX
))
92 umem
->fq
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
93 umem
->need_wakeup
&= ~XDP_WAKEUP_RX
;
95 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup
);
97 void xsk_clear_tx_need_wakeup(struct xdp_umem
*umem
)
101 if (!(umem
->need_wakeup
& XDP_WAKEUP_TX
))
105 list_for_each_entry_rcu(xs
, &umem
->xsk_list
, list
) {
106 xs
->tx
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
110 umem
->need_wakeup
&= ~XDP_WAKEUP_TX
;
112 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup
);
114 bool xsk_umem_uses_need_wakeup(struct xdp_umem
*umem
)
116 return umem
->flags
& XDP_UMEM_USES_NEED_WAKEUP
;
118 EXPORT_SYMBOL(xsk_umem_uses_need_wakeup
);
120 /* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
121 * each page. This is only required in copy mode.
123 static void __xsk_rcv_memcpy(struct xdp_umem
*umem
, u64 addr
, void *from_buf
,
124 u32 len
, u32 metalen
)
126 void *to_buf
= xdp_umem_get_data(umem
, addr
);
128 addr
= xsk_umem_add_offset_to_addr(addr
);
129 if (xskq_cons_crosses_non_contig_pg(umem
, addr
, len
+ metalen
)) {
130 void *next_pg_addr
= umem
->pages
[(addr
>> PAGE_SHIFT
) + 1].addr
;
131 u64 page_start
= addr
& ~(PAGE_SIZE
- 1);
132 u64 first_len
= PAGE_SIZE
- (addr
- page_start
);
134 memcpy(to_buf
, from_buf
, first_len
+ metalen
);
135 memcpy(next_pg_addr
, from_buf
+ first_len
, len
- first_len
);
140 memcpy(to_buf
, from_buf
, len
+ metalen
);
143 static int __xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
145 u64 offset
= xs
->umem
->headroom
;
146 u64 addr
, memcpy_addr
;
151 if (!xskq_cons_peek_addr(xs
->umem
->fq
, &addr
, xs
->umem
) ||
152 len
> xs
->umem
->chunk_size_nohr
- XDP_PACKET_HEADROOM
) {
157 if (unlikely(xdp_data_meta_unsupported(xdp
))) {
158 from_buf
= xdp
->data
;
161 from_buf
= xdp
->data_meta
;
162 metalen
= xdp
->data
- xdp
->data_meta
;
165 memcpy_addr
= xsk_umem_adjust_offset(xs
->umem
, addr
, offset
);
166 __xsk_rcv_memcpy(xs
->umem
, memcpy_addr
, from_buf
, len
, metalen
);
169 addr
= xsk_umem_adjust_offset(xs
->umem
, addr
, offset
);
170 err
= xskq_prod_reserve_desc(xs
->rx
, addr
, len
);
172 xskq_cons_release(xs
->umem
->fq
);
173 xdp_return_buff(xdp
);
181 static int __xsk_rcv_zc(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
183 int err
= xskq_prod_reserve_desc(xs
->rx
, xdp
->handle
, len
);
191 static bool xsk_is_bound(struct xdp_sock
*xs
)
193 if (READ_ONCE(xs
->state
) == XSK_BOUND
) {
194 /* Matches smp_wmb() in bind(). */
201 static int xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
205 if (!xsk_is_bound(xs
))
208 if (xs
->dev
!= xdp
->rxq
->dev
|| xs
->queue_id
!= xdp
->rxq
->queue_index
)
211 len
= xdp
->data_end
- xdp
->data
;
213 return (xdp
->rxq
->mem
.type
== MEM_TYPE_ZERO_COPY
) ?
214 __xsk_rcv_zc(xs
, xdp
, len
) : __xsk_rcv(xs
, xdp
, len
);
217 static void xsk_flush(struct xdp_sock
*xs
)
219 xskq_prod_submit(xs
->rx
);
220 sock_def_readable(&xs
->sk
);
223 int xsk_generic_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
225 u32 metalen
= xdp
->data
- xdp
->data_meta
;
226 u32 len
= xdp
->data_end
- xdp
->data
;
227 u64 offset
= xs
->umem
->headroom
;
232 spin_lock_bh(&xs
->rx_lock
);
234 if (xs
->dev
!= xdp
->rxq
->dev
|| xs
->queue_id
!= xdp
->rxq
->queue_index
) {
239 if (!xskq_cons_peek_addr(xs
->umem
->fq
, &addr
, xs
->umem
) ||
240 len
> xs
->umem
->chunk_size_nohr
- XDP_PACKET_HEADROOM
) {
245 addr
= xsk_umem_adjust_offset(xs
->umem
, addr
, offset
);
246 buffer
= xdp_umem_get_data(xs
->umem
, addr
);
247 memcpy(buffer
, xdp
->data_meta
, len
+ metalen
);
249 addr
= xsk_umem_adjust_offset(xs
->umem
, addr
, metalen
);
250 err
= xskq_prod_reserve_desc(xs
->rx
, addr
, len
);
254 xskq_cons_release(xs
->umem
->fq
);
255 xskq_prod_submit(xs
->rx
);
257 spin_unlock_bh(&xs
->rx_lock
);
259 xs
->sk
.sk_data_ready(&xs
->sk
);
265 spin_unlock_bh(&xs
->rx_lock
);
269 int __xsk_map_redirect(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
271 struct list_head
*flush_list
= this_cpu_ptr(&xskmap_flush_list
);
274 err
= xsk_rcv(xs
, xdp
);
278 if (!xs
->flush_node
.prev
)
279 list_add(&xs
->flush_node
, flush_list
);
284 void __xsk_map_flush(void)
286 struct list_head
*flush_list
= this_cpu_ptr(&xskmap_flush_list
);
287 struct xdp_sock
*xs
, *tmp
;
289 list_for_each_entry_safe(xs
, tmp
, flush_list
, flush_node
) {
291 __list_del_clearprev(&xs
->flush_node
);
295 void xsk_umem_complete_tx(struct xdp_umem
*umem
, u32 nb_entries
)
297 xskq_prod_submit_n(umem
->cq
, nb_entries
);
299 EXPORT_SYMBOL(xsk_umem_complete_tx
);
301 void xsk_umem_consume_tx_done(struct xdp_umem
*umem
)
306 list_for_each_entry_rcu(xs
, &umem
->xsk_list
, list
) {
307 xs
->sk
.sk_write_space(&xs
->sk
);
311 EXPORT_SYMBOL(xsk_umem_consume_tx_done
);
313 bool xsk_umem_consume_tx(struct xdp_umem
*umem
, struct xdp_desc
*desc
)
318 list_for_each_entry_rcu(xs
, &umem
->xsk_list
, list
) {
319 if (!xskq_cons_peek_desc(xs
->tx
, desc
, umem
))
322 /* This is the backpreassure mechanism for the Tx path.
323 * Reserve space in the completion queue and only proceed
324 * if there is space in it. This avoids having to implement
325 * any buffering in the Tx path.
327 if (xskq_prod_reserve_addr(umem
->cq
, desc
->addr
))
330 xskq_cons_release(xs
->tx
);
339 EXPORT_SYMBOL(xsk_umem_consume_tx
);
341 static int xsk_wakeup(struct xdp_sock
*xs
, u8 flags
)
343 struct net_device
*dev
= xs
->dev
;
347 err
= dev
->netdev_ops
->ndo_xsk_wakeup(dev
, xs
->queue_id
, flags
);
353 static int xsk_zc_xmit(struct xdp_sock
*xs
)
355 return xsk_wakeup(xs
, XDP_WAKEUP_TX
);
358 static void xsk_destruct_skb(struct sk_buff
*skb
)
360 u64 addr
= (u64
)(long)skb_shinfo(skb
)->destructor_arg
;
361 struct xdp_sock
*xs
= xdp_sk(skb
->sk
);
364 spin_lock_irqsave(&xs
->tx_completion_lock
, flags
);
365 xskq_prod_submit_addr(xs
->umem
->cq
, addr
);
366 spin_unlock_irqrestore(&xs
->tx_completion_lock
, flags
);
371 static int xsk_generic_xmit(struct sock
*sk
)
373 struct xdp_sock
*xs
= xdp_sk(sk
);
374 u32 max_batch
= TX_BATCH_SIZE
;
375 bool sent_frame
= false;
376 struct xdp_desc desc
;
380 mutex_lock(&xs
->mutex
);
382 if (xs
->queue_id
>= xs
->dev
->real_num_tx_queues
)
385 while (xskq_cons_peek_desc(xs
->tx
, &desc
, xs
->umem
)) {
390 if (max_batch
-- == 0) {
396 skb
= sock_alloc_send_skb(sk
, len
, 1, &err
);
397 if (unlikely(!skb
)) {
404 buffer
= xdp_umem_get_data(xs
->umem
, addr
);
405 err
= skb_store_bits(skb
, 0, buffer
, len
);
406 /* This is the backpreassure mechanism for the Tx path.
407 * Reserve space in the completion queue and only proceed
408 * if there is space in it. This avoids having to implement
409 * any buffering in the Tx path.
411 if (unlikely(err
) || xskq_prod_reserve(xs
->umem
->cq
)) {
417 skb
->priority
= sk
->sk_priority
;
418 skb
->mark
= sk
->sk_mark
;
419 skb_shinfo(skb
)->destructor_arg
= (void *)(long)desc
.addr
;
420 skb
->destructor
= xsk_destruct_skb
;
422 err
= dev_direct_xmit(skb
, xs
->queue_id
);
423 xskq_cons_release(xs
->tx
);
424 /* Ignore NET_XMIT_CN as packet might have been sent */
425 if (err
== NET_XMIT_DROP
|| err
== NETDEV_TX_BUSY
) {
426 /* SKB completed but not sent */
436 sk
->sk_write_space(sk
);
438 mutex_unlock(&xs
->mutex
);
442 static int __xsk_sendmsg(struct sock
*sk
)
444 struct xdp_sock
*xs
= xdp_sk(sk
);
446 if (unlikely(!(xs
->dev
->flags
& IFF_UP
)))
448 if (unlikely(!xs
->tx
))
451 return xs
->zc
? xsk_zc_xmit(xs
) : xsk_generic_xmit(sk
);
454 static int xsk_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
456 bool need_wait
= !(m
->msg_flags
& MSG_DONTWAIT
);
457 struct sock
*sk
= sock
->sk
;
458 struct xdp_sock
*xs
= xdp_sk(sk
);
460 if (unlikely(!xsk_is_bound(xs
)))
462 if (unlikely(need_wait
))
465 return __xsk_sendmsg(sk
);
468 static __poll_t
xsk_poll(struct file
*file
, struct socket
*sock
,
469 struct poll_table_struct
*wait
)
471 __poll_t mask
= datagram_poll(file
, sock
, wait
);
472 struct sock
*sk
= sock
->sk
;
473 struct xdp_sock
*xs
= xdp_sk(sk
);
474 struct xdp_umem
*umem
;
476 if (unlikely(!xsk_is_bound(xs
)))
481 if (umem
->need_wakeup
) {
483 xsk_wakeup(xs
, umem
->need_wakeup
);
485 /* Poll needs to drive Tx also in copy mode */
489 if (xs
->rx
&& !xskq_prod_is_empty(xs
->rx
))
490 mask
|= EPOLLIN
| EPOLLRDNORM
;
491 if (xs
->tx
&& !xskq_cons_is_full(xs
->tx
))
492 mask
|= EPOLLOUT
| EPOLLWRNORM
;
497 static int xsk_init_queue(u32 entries
, struct xsk_queue
**queue
,
502 if (entries
== 0 || *queue
|| !is_power_of_2(entries
))
505 q
= xskq_create(entries
, umem_queue
);
509 /* Make sure queue is ready before it can be seen by others */
511 WRITE_ONCE(*queue
, q
);
515 static void xsk_unbind_dev(struct xdp_sock
*xs
)
517 struct net_device
*dev
= xs
->dev
;
519 if (xs
->state
!= XSK_BOUND
)
521 WRITE_ONCE(xs
->state
, XSK_UNBOUND
);
523 /* Wait for driver to stop using the xdp socket. */
524 xdp_del_sk_umem(xs
->umem
, xs
);
530 static struct xsk_map
*xsk_get_map_list_entry(struct xdp_sock
*xs
,
531 struct xdp_sock
***map_entry
)
533 struct xsk_map
*map
= NULL
;
534 struct xsk_map_node
*node
;
538 spin_lock_bh(&xs
->map_list_lock
);
539 node
= list_first_entry_or_null(&xs
->map_list
, struct xsk_map_node
,
542 WARN_ON(xsk_map_inc(node
->map
));
544 *map_entry
= node
->map_entry
;
546 spin_unlock_bh(&xs
->map_list_lock
);
550 static void xsk_delete_from_maps(struct xdp_sock
*xs
)
552 /* This function removes the current XDP socket from all the
553 * maps it resides in. We need to take extra care here, due to
554 * the two locks involved. Each map has a lock synchronizing
555 * updates to the entries, and each socket has a lock that
556 * synchronizes access to the list of maps (map_list). For
557 * deadlock avoidance the locks need to be taken in the order
558 * "map lock"->"socket map list lock". We start off by
559 * accessing the socket map list, and take a reference to the
560 * map to guarantee existence between the
561 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
562 * calls. Then we ask the map to remove the socket, which
563 * tries to remove the socket from the map. Note that there
564 * might be updates to the map between
565 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
567 struct xdp_sock
**map_entry
= NULL
;
570 while ((map
= xsk_get_map_list_entry(xs
, &map_entry
))) {
571 xsk_map_try_sock_delete(map
, xs
, map_entry
);
576 static int xsk_release(struct socket
*sock
)
578 struct sock
*sk
= sock
->sk
;
579 struct xdp_sock
*xs
= xdp_sk(sk
);
587 mutex_lock(&net
->xdp
.lock
);
588 sk_del_node_init_rcu(sk
);
589 mutex_unlock(&net
->xdp
.lock
);
592 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
595 xsk_delete_from_maps(xs
);
596 mutex_lock(&xs
->mutex
);
598 mutex_unlock(&xs
->mutex
);
600 xskq_destroy(xs
->rx
);
601 xskq_destroy(xs
->tx
);
606 sk_refcnt_debug_release(sk
);
612 static struct socket
*xsk_lookup_xsk_from_fd(int fd
)
617 sock
= sockfd_lookup(fd
, &err
);
619 return ERR_PTR(-ENOTSOCK
);
621 if (sock
->sk
->sk_family
!= PF_XDP
) {
623 return ERR_PTR(-ENOPROTOOPT
);
629 /* Check if umem pages are contiguous.
630 * If zero-copy mode, use the DMA address to do the page contiguity check
631 * For all other modes we use addr (kernel virtual address)
632 * Store the result in the low bits of addr.
634 static void xsk_check_page_contiguity(struct xdp_umem
*umem
, u32 flags
)
636 struct xdp_umem_page
*pgs
= umem
->pages
;
639 for (i
= 0; i
< umem
->npgs
- 1; i
++) {
640 is_contig
= (flags
& XDP_ZEROCOPY
) ?
641 (pgs
[i
].dma
+ PAGE_SIZE
== pgs
[i
+ 1].dma
) :
642 (pgs
[i
].addr
+ PAGE_SIZE
== pgs
[i
+ 1].addr
);
643 pgs
[i
].addr
+= is_contig
<< XSK_NEXT_PG_CONTIG_SHIFT
;
647 static int xsk_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
649 struct sockaddr_xdp
*sxdp
= (struct sockaddr_xdp
*)addr
;
650 struct sock
*sk
= sock
->sk
;
651 struct xdp_sock
*xs
= xdp_sk(sk
);
652 struct net_device
*dev
;
656 if (addr_len
< sizeof(struct sockaddr_xdp
))
658 if (sxdp
->sxdp_family
!= AF_XDP
)
661 flags
= sxdp
->sxdp_flags
;
662 if (flags
& ~(XDP_SHARED_UMEM
| XDP_COPY
| XDP_ZEROCOPY
|
663 XDP_USE_NEED_WAKEUP
))
667 mutex_lock(&xs
->mutex
);
668 if (xs
->state
!= XSK_READY
) {
673 dev
= dev_get_by_index(sock_net(sk
), sxdp
->sxdp_ifindex
);
679 if (!xs
->rx
&& !xs
->tx
) {
684 qid
= sxdp
->sxdp_queue_id
;
686 if (flags
& XDP_SHARED_UMEM
) {
687 struct xdp_sock
*umem_xs
;
690 if ((flags
& XDP_COPY
) || (flags
& XDP_ZEROCOPY
) ||
691 (flags
& XDP_USE_NEED_WAKEUP
)) {
692 /* Cannot specify flags for shared sockets. */
698 /* We have already our own. */
703 sock
= xsk_lookup_xsk_from_fd(sxdp
->sxdp_shared_umem_fd
);
709 umem_xs
= xdp_sk(sock
->sk
);
710 if (!xsk_is_bound(umem_xs
)) {
715 if (umem_xs
->dev
!= dev
|| umem_xs
->queue_id
!= qid
) {
721 xdp_get_umem(umem_xs
->umem
);
722 WRITE_ONCE(xs
->umem
, umem_xs
->umem
);
724 } else if (!xs
->umem
|| !xdp_umem_validate_queues(xs
->umem
)) {
728 /* This xsk has its own umem. */
729 xskq_set_umem(xs
->umem
->fq
, xs
->umem
->size
,
730 xs
->umem
->chunk_mask
);
731 xskq_set_umem(xs
->umem
->cq
, xs
->umem
->size
,
732 xs
->umem
->chunk_mask
);
734 err
= xdp_umem_assign_dev(xs
->umem
, dev
, qid
, flags
);
738 xsk_check_page_contiguity(xs
->umem
, flags
);
742 xs
->zc
= xs
->umem
->zc
;
744 xskq_set_umem(xs
->rx
, xs
->umem
->size
, xs
->umem
->chunk_mask
);
745 xskq_set_umem(xs
->tx
, xs
->umem
->size
, xs
->umem
->chunk_mask
);
746 xdp_add_sk_umem(xs
->umem
, xs
);
752 /* Matches smp_rmb() in bind() for shared umem
753 * sockets, and xsk_is_bound().
756 WRITE_ONCE(xs
->state
, XSK_BOUND
);
759 mutex_unlock(&xs
->mutex
);
764 struct xdp_umem_reg_v1
{
765 __u64 addr
; /* Start of packet data area */
766 __u64 len
; /* Length of packet data area */
771 static int xsk_setsockopt(struct socket
*sock
, int level
, int optname
,
772 char __user
*optval
, unsigned int optlen
)
774 struct sock
*sk
= sock
->sk
;
775 struct xdp_sock
*xs
= xdp_sk(sk
);
778 if (level
!= SOL_XDP
)
785 struct xsk_queue
**q
;
788 if (optlen
< sizeof(entries
))
790 if (copy_from_user(&entries
, optval
, sizeof(entries
)))
793 mutex_lock(&xs
->mutex
);
794 if (xs
->state
!= XSK_READY
) {
795 mutex_unlock(&xs
->mutex
);
798 q
= (optname
== XDP_TX_RING
) ? &xs
->tx
: &xs
->rx
;
799 err
= xsk_init_queue(entries
, q
, false);
800 if (!err
&& optname
== XDP_TX_RING
)
801 /* Tx needs to be explicitly woken up the first time */
802 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
803 mutex_unlock(&xs
->mutex
);
808 size_t mr_size
= sizeof(struct xdp_umem_reg
);
809 struct xdp_umem_reg mr
= {};
810 struct xdp_umem
*umem
;
812 if (optlen
< sizeof(struct xdp_umem_reg_v1
))
814 else if (optlen
< sizeof(mr
))
815 mr_size
= sizeof(struct xdp_umem_reg_v1
);
817 if (copy_from_user(&mr
, optval
, mr_size
))
820 mutex_lock(&xs
->mutex
);
821 if (xs
->state
!= XSK_READY
|| xs
->umem
) {
822 mutex_unlock(&xs
->mutex
);
826 umem
= xdp_umem_create(&mr
);
828 mutex_unlock(&xs
->mutex
);
829 return PTR_ERR(umem
);
832 /* Make sure umem is ready before it can be seen by others */
834 WRITE_ONCE(xs
->umem
, umem
);
835 mutex_unlock(&xs
->mutex
);
838 case XDP_UMEM_FILL_RING
:
839 case XDP_UMEM_COMPLETION_RING
:
841 struct xsk_queue
**q
;
844 if (copy_from_user(&entries
, optval
, sizeof(entries
)))
847 mutex_lock(&xs
->mutex
);
848 if (xs
->state
!= XSK_READY
) {
849 mutex_unlock(&xs
->mutex
);
853 mutex_unlock(&xs
->mutex
);
857 q
= (optname
== XDP_UMEM_FILL_RING
) ? &xs
->umem
->fq
:
859 err
= xsk_init_queue(entries
, q
, true);
860 mutex_unlock(&xs
->mutex
);
870 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1
*ring
)
872 ring
->producer
= offsetof(struct xdp_rxtx_ring
, ptrs
.producer
);
873 ring
->consumer
= offsetof(struct xdp_rxtx_ring
, ptrs
.consumer
);
874 ring
->desc
= offsetof(struct xdp_rxtx_ring
, desc
);
877 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1
*ring
)
879 ring
->producer
= offsetof(struct xdp_umem_ring
, ptrs
.producer
);
880 ring
->consumer
= offsetof(struct xdp_umem_ring
, ptrs
.consumer
);
881 ring
->desc
= offsetof(struct xdp_umem_ring
, desc
);
884 static int xsk_getsockopt(struct socket
*sock
, int level
, int optname
,
885 char __user
*optval
, int __user
*optlen
)
887 struct sock
*sk
= sock
->sk
;
888 struct xdp_sock
*xs
= xdp_sk(sk
);
891 if (level
!= SOL_XDP
)
894 if (get_user(len
, optlen
))
902 struct xdp_statistics stats
;
904 if (len
< sizeof(stats
))
907 mutex_lock(&xs
->mutex
);
908 stats
.rx_dropped
= xs
->rx_dropped
;
909 stats
.rx_invalid_descs
= xskq_nb_invalid_descs(xs
->rx
);
910 stats
.tx_invalid_descs
= xskq_nb_invalid_descs(xs
->tx
);
911 mutex_unlock(&xs
->mutex
);
913 if (copy_to_user(optval
, &stats
, sizeof(stats
)))
915 if (put_user(sizeof(stats
), optlen
))
920 case XDP_MMAP_OFFSETS
:
922 struct xdp_mmap_offsets off
;
923 struct xdp_mmap_offsets_v1 off_v1
;
924 bool flags_supported
= true;
927 if (len
< sizeof(off_v1
))
929 else if (len
< sizeof(off
))
930 flags_supported
= false;
932 if (flags_supported
) {
933 /* xdp_ring_offset is identical to xdp_ring_offset_v1
934 * except for the flags field added to the end.
936 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
938 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
940 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
942 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
944 off
.rx
.flags
= offsetof(struct xdp_rxtx_ring
,
946 off
.tx
.flags
= offsetof(struct xdp_rxtx_ring
,
948 off
.fr
.flags
= offsetof(struct xdp_umem_ring
,
950 off
.cr
.flags
= offsetof(struct xdp_umem_ring
,
956 xsk_enter_rxtx_offsets(&off_v1
.rx
);
957 xsk_enter_rxtx_offsets(&off_v1
.tx
);
958 xsk_enter_umem_offsets(&off_v1
.fr
);
959 xsk_enter_umem_offsets(&off_v1
.cr
);
961 len
= sizeof(off_v1
);
965 if (copy_to_user(optval
, to_copy
, len
))
967 if (put_user(len
, optlen
))
974 struct xdp_options opts
= {};
976 if (len
< sizeof(opts
))
979 mutex_lock(&xs
->mutex
);
981 opts
.flags
|= XDP_OPTIONS_ZEROCOPY
;
982 mutex_unlock(&xs
->mutex
);
985 if (copy_to_user(optval
, &opts
, len
))
987 if (put_user(len
, optlen
))
999 static int xsk_mmap(struct file
*file
, struct socket
*sock
,
1000 struct vm_area_struct
*vma
)
1002 loff_t offset
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
1003 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
1004 struct xdp_sock
*xs
= xdp_sk(sock
->sk
);
1005 struct xsk_queue
*q
= NULL
;
1006 struct xdp_umem
*umem
;
1010 if (READ_ONCE(xs
->state
) != XSK_READY
)
1013 if (offset
== XDP_PGOFF_RX_RING
) {
1014 q
= READ_ONCE(xs
->rx
);
1015 } else if (offset
== XDP_PGOFF_TX_RING
) {
1016 q
= READ_ONCE(xs
->tx
);
1018 umem
= READ_ONCE(xs
->umem
);
1022 /* Matches the smp_wmb() in XDP_UMEM_REG */
1024 if (offset
== XDP_UMEM_PGOFF_FILL_RING
)
1025 q
= READ_ONCE(umem
->fq
);
1026 else if (offset
== XDP_UMEM_PGOFF_COMPLETION_RING
)
1027 q
= READ_ONCE(umem
->cq
);
1033 /* Matches the smp_wmb() in xsk_init_queue */
1035 qpg
= virt_to_head_page(q
->ring
);
1036 if (size
> page_size(qpg
))
1039 pfn
= virt_to_phys(q
->ring
) >> PAGE_SHIFT
;
1040 return remap_pfn_range(vma
, vma
->vm_start
, pfn
,
1041 size
, vma
->vm_page_prot
);
1044 static int xsk_notifier(struct notifier_block
*this,
1045 unsigned long msg
, void *ptr
)
1047 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1048 struct net
*net
= dev_net(dev
);
1052 case NETDEV_UNREGISTER
:
1053 mutex_lock(&net
->xdp
.lock
);
1054 sk_for_each(sk
, &net
->xdp
.list
) {
1055 struct xdp_sock
*xs
= xdp_sk(sk
);
1057 mutex_lock(&xs
->mutex
);
1058 if (xs
->dev
== dev
) {
1059 sk
->sk_err
= ENETDOWN
;
1060 if (!sock_flag(sk
, SOCK_DEAD
))
1061 sk
->sk_error_report(sk
);
1065 /* Clear device references in umem. */
1066 xdp_umem_clear_dev(xs
->umem
);
1068 mutex_unlock(&xs
->mutex
);
1070 mutex_unlock(&net
->xdp
.lock
);
1076 static struct proto xsk_proto
= {
1078 .owner
= THIS_MODULE
,
1079 .obj_size
= sizeof(struct xdp_sock
),
1082 static const struct proto_ops xsk_proto_ops
= {
1084 .owner
= THIS_MODULE
,
1085 .release
= xsk_release
,
1087 .connect
= sock_no_connect
,
1088 .socketpair
= sock_no_socketpair
,
1089 .accept
= sock_no_accept
,
1090 .getname
= sock_no_getname
,
1092 .ioctl
= sock_no_ioctl
,
1093 .listen
= sock_no_listen
,
1094 .shutdown
= sock_no_shutdown
,
1095 .setsockopt
= xsk_setsockopt
,
1096 .getsockopt
= xsk_getsockopt
,
1097 .sendmsg
= xsk_sendmsg
,
1098 .recvmsg
= sock_no_recvmsg
,
1100 .sendpage
= sock_no_sendpage
,
1103 static void xsk_destruct(struct sock
*sk
)
1105 struct xdp_sock
*xs
= xdp_sk(sk
);
1107 if (!sock_flag(sk
, SOCK_DEAD
))
1110 xdp_put_umem(xs
->umem
);
1112 sk_refcnt_debug_dec(sk
);
1115 static int xsk_create(struct net
*net
, struct socket
*sock
, int protocol
,
1119 struct xdp_sock
*xs
;
1121 if (!ns_capable(net
->user_ns
, CAP_NET_RAW
))
1123 if (sock
->type
!= SOCK_RAW
)
1124 return -ESOCKTNOSUPPORT
;
1127 return -EPROTONOSUPPORT
;
1129 sock
->state
= SS_UNCONNECTED
;
1131 sk
= sk_alloc(net
, PF_XDP
, GFP_KERNEL
, &xsk_proto
, kern
);
1135 sock
->ops
= &xsk_proto_ops
;
1137 sock_init_data(sock
, sk
);
1139 sk
->sk_family
= PF_XDP
;
1141 sk
->sk_destruct
= xsk_destruct
;
1142 sk_refcnt_debug_inc(sk
);
1144 sock_set_flag(sk
, SOCK_RCU_FREE
);
1147 xs
->state
= XSK_READY
;
1148 mutex_init(&xs
->mutex
);
1149 spin_lock_init(&xs
->rx_lock
);
1150 spin_lock_init(&xs
->tx_completion_lock
);
1152 INIT_LIST_HEAD(&xs
->map_list
);
1153 spin_lock_init(&xs
->map_list_lock
);
1155 mutex_lock(&net
->xdp
.lock
);
1156 sk_add_node_rcu(sk
, &net
->xdp
.list
);
1157 mutex_unlock(&net
->xdp
.lock
);
1160 sock_prot_inuse_add(net
, &xsk_proto
, 1);
1166 static const struct net_proto_family xsk_family_ops
= {
1168 .create
= xsk_create
,
1169 .owner
= THIS_MODULE
,
1172 static struct notifier_block xsk_netdev_notifier
= {
1173 .notifier_call
= xsk_notifier
,
1176 static int __net_init
xsk_net_init(struct net
*net
)
1178 mutex_init(&net
->xdp
.lock
);
1179 INIT_HLIST_HEAD(&net
->xdp
.list
);
1183 static void __net_exit
xsk_net_exit(struct net
*net
)
1185 WARN_ON_ONCE(!hlist_empty(&net
->xdp
.list
));
1188 static struct pernet_operations xsk_net_ops
= {
1189 .init
= xsk_net_init
,
1190 .exit
= xsk_net_exit
,
1193 static int __init
xsk_init(void)
1197 err
= proto_register(&xsk_proto
, 0 /* no slab */);
1201 err
= sock_register(&xsk_family_ops
);
1205 err
= register_pernet_subsys(&xsk_net_ops
);
1209 err
= register_netdevice_notifier(&xsk_netdev_notifier
);
1213 for_each_possible_cpu(cpu
)
1214 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list
, cpu
));
1218 unregister_pernet_subsys(&xsk_net_ops
);
1220 sock_unregister(PF_XDP
);
1222 proto_unregister(&xsk_proto
);
1227 fs_initcall(xsk_init
);