1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation. */
4 #include <asm/barrier.h>
9 #include <linux/compiler.h>
10 #include <linux/if_link.h>
11 #include <linux/if_xdp.h>
12 #include <linux/if_ether.h>
14 #include <linux/limits.h>
15 #include <linux/udp.h>
16 #include <arpa/inet.h>
18 #include <net/ethernet.h>
27 #include <sys/capability.h>
29 #include <sys/resource.h>
30 #include <sys/socket.h>
31 #include <sys/types.h>
36 #include <bpf/libbpf.h>
53 #define NUM_FRAMES (4 * 1024)
54 #define MIN_PKT_SIZE 64
56 #define DEBUG_HEXDUMP 0
63 static unsigned long prev_time
;
71 static enum benchmark_type opt_bench
= BENCH_RXDROP
;
72 static u32 opt_xdp_flags
= XDP_FLAGS_UPDATE_IF_NOEXIST
;
73 static const char *opt_if
= "";
74 static int opt_ifindex
;
76 static unsigned long opt_duration
;
77 static unsigned long start_time
;
78 static bool benchmark_done
;
79 static u32 opt_batch_size
= 64;
80 static int opt_pkt_count
;
81 static u16 opt_pkt_size
= MIN_PKT_SIZE
;
82 static u32 opt_pkt_fill_pattern
= 0x12345678;
83 static bool opt_extra_stats
;
84 static bool opt_quiet
;
85 static bool opt_app_stats
;
86 static const char *opt_irq_str
= "";
88 static int irqs_at_init
= -1;
90 static int opt_interval
= 1;
91 static u32 opt_xdp_bind_flags
= XDP_USE_NEED_WAKEUP
;
92 static u32 opt_umem_flags
;
93 static int opt_unaligned_chunks
;
94 static int opt_mmap_flags
;
95 static int opt_xsk_frame_size
= XSK_UMEM__DEFAULT_FRAME_SIZE
;
96 static int opt_timeout
= 1000;
97 static bool opt_need_wakeup
= true;
98 static u32 opt_num_xsks
= 1;
100 static bool opt_busy_poll
;
101 static bool opt_reduced_cap
;
103 struct xsk_ring_stats
{
104 unsigned long rx_npkts
;
105 unsigned long tx_npkts
;
106 unsigned long rx_dropped_npkts
;
107 unsigned long rx_invalid_npkts
;
108 unsigned long tx_invalid_npkts
;
109 unsigned long rx_full_npkts
;
110 unsigned long rx_fill_empty_npkts
;
111 unsigned long tx_empty_npkts
;
112 unsigned long prev_rx_npkts
;
113 unsigned long prev_tx_npkts
;
114 unsigned long prev_rx_dropped_npkts
;
115 unsigned long prev_rx_invalid_npkts
;
116 unsigned long prev_tx_invalid_npkts
;
117 unsigned long prev_rx_full_npkts
;
118 unsigned long prev_rx_fill_empty_npkts
;
119 unsigned long prev_tx_empty_npkts
;
122 struct xsk_driver_stats
{
124 unsigned long prev_intrs
;
127 struct xsk_app_stats
{
128 unsigned long rx_empty_polls
;
129 unsigned long fill_fail_polls
;
130 unsigned long copy_tx_sendtos
;
131 unsigned long tx_wakeup_sendtos
;
132 unsigned long opt_polls
;
133 unsigned long prev_rx_empty_polls
;
134 unsigned long prev_fill_fail_polls
;
135 unsigned long prev_copy_tx_sendtos
;
136 unsigned long prev_tx_wakeup_sendtos
;
137 unsigned long prev_opt_polls
;
140 struct xsk_umem_info
{
141 struct xsk_ring_prod fq
;
142 struct xsk_ring_cons cq
;
143 struct xsk_umem
*umem
;
147 struct xsk_socket_info
{
148 struct xsk_ring_cons rx
;
149 struct xsk_ring_prod tx
;
150 struct xsk_umem_info
*umem
;
151 struct xsk_socket
*xsk
;
152 struct xsk_ring_stats ring_stats
;
153 struct xsk_app_stats app_stats
;
154 struct xsk_driver_stats drv_stats
;
158 static int num_socks
;
159 struct xsk_socket_info
*xsks
[MAX_SOCKS
];
162 static unsigned long get_nsecs(void)
166 clock_gettime(CLOCK_MONOTONIC
, &ts
);
167 return ts
.tv_sec
* 1000000000UL + ts
.tv_nsec
;
170 static void print_benchmark(bool running
)
172 const char *bench_str
= "INVALID";
174 if (opt_bench
== BENCH_RXDROP
)
175 bench_str
= "rxdrop";
176 else if (opt_bench
== BENCH_TXONLY
)
177 bench_str
= "txonly";
178 else if (opt_bench
== BENCH_L2FWD
)
181 printf("%s:%d %s ", opt_if
, opt_queue
, bench_str
);
182 if (opt_xdp_flags
& XDP_FLAGS_SKB_MODE
)
184 else if (opt_xdp_flags
& XDP_FLAGS_DRV_MODE
)
193 printf("running...");
198 static int xsk_get_xdp_stats(int fd
, struct xsk_socket_info
*xsk
)
200 struct xdp_statistics stats
;
204 optlen
= sizeof(stats
);
205 err
= getsockopt(fd
, SOL_XDP
, XDP_STATISTICS
, &stats
, &optlen
);
209 if (optlen
== sizeof(struct xdp_statistics
)) {
210 xsk
->ring_stats
.rx_dropped_npkts
= stats
.rx_dropped
;
211 xsk
->ring_stats
.rx_invalid_npkts
= stats
.rx_invalid_descs
;
212 xsk
->ring_stats
.tx_invalid_npkts
= stats
.tx_invalid_descs
;
213 xsk
->ring_stats
.rx_full_npkts
= stats
.rx_ring_full
;
214 xsk
->ring_stats
.rx_fill_empty_npkts
= stats
.rx_fill_ring_empty_descs
;
215 xsk
->ring_stats
.tx_empty_npkts
= stats
.tx_ring_empty_descs
;
222 static void dump_app_stats(long dt
)
226 for (i
= 0; i
< num_socks
&& xsks
[i
]; i
++) {
227 char *fmt
= "%-18s %'-14.0f %'-14lu\n";
228 double rx_empty_polls_ps
, fill_fail_polls_ps
, copy_tx_sendtos_ps
,
229 tx_wakeup_sendtos_ps
, opt_polls_ps
;
231 rx_empty_polls_ps
= (xsks
[i
]->app_stats
.rx_empty_polls
-
232 xsks
[i
]->app_stats
.prev_rx_empty_polls
) * 1000000000. / dt
;
233 fill_fail_polls_ps
= (xsks
[i
]->app_stats
.fill_fail_polls
-
234 xsks
[i
]->app_stats
.prev_fill_fail_polls
) * 1000000000. / dt
;
235 copy_tx_sendtos_ps
= (xsks
[i
]->app_stats
.copy_tx_sendtos
-
236 xsks
[i
]->app_stats
.prev_copy_tx_sendtos
) * 1000000000. / dt
;
237 tx_wakeup_sendtos_ps
= (xsks
[i
]->app_stats
.tx_wakeup_sendtos
-
238 xsks
[i
]->app_stats
.prev_tx_wakeup_sendtos
)
240 opt_polls_ps
= (xsks
[i
]->app_stats
.opt_polls
-
241 xsks
[i
]->app_stats
.prev_opt_polls
) * 1000000000. / dt
;
243 printf("\n%-18s %-14s %-14s\n", "", "calls/s", "count");
244 printf(fmt
, "rx empty polls", rx_empty_polls_ps
, xsks
[i
]->app_stats
.rx_empty_polls
);
245 printf(fmt
, "fill fail polls", fill_fail_polls_ps
,
246 xsks
[i
]->app_stats
.fill_fail_polls
);
247 printf(fmt
, "copy tx sendtos", copy_tx_sendtos_ps
,
248 xsks
[i
]->app_stats
.copy_tx_sendtos
);
249 printf(fmt
, "tx wakeup sendtos", tx_wakeup_sendtos_ps
,
250 xsks
[i
]->app_stats
.tx_wakeup_sendtos
);
251 printf(fmt
, "opt polls", opt_polls_ps
, xsks
[i
]->app_stats
.opt_polls
);
253 xsks
[i
]->app_stats
.prev_rx_empty_polls
= xsks
[i
]->app_stats
.rx_empty_polls
;
254 xsks
[i
]->app_stats
.prev_fill_fail_polls
= xsks
[i
]->app_stats
.fill_fail_polls
;
255 xsks
[i
]->app_stats
.prev_copy_tx_sendtos
= xsks
[i
]->app_stats
.copy_tx_sendtos
;
256 xsks
[i
]->app_stats
.prev_tx_wakeup_sendtos
= xsks
[i
]->app_stats
.tx_wakeup_sendtos
;
257 xsks
[i
]->app_stats
.prev_opt_polls
= xsks
[i
]->app_stats
.opt_polls
;
261 static bool get_interrupt_number(void)
267 f_int_proc
= fopen("/proc/interrupts", "r");
268 if (f_int_proc
== NULL
) {
269 printf("Failed to open /proc/interrupts.\n");
273 while (!feof(f_int_proc
) && !found
) {
274 /* Make sure to read a full line at a time */
275 if (fgets(line
, sizeof(line
), f_int_proc
) == NULL
||
276 line
[strlen(line
) - 1] != '\n') {
277 printf("Error reading from interrupts file\n");
281 /* Extract interrupt number from line */
282 if (strstr(line
, opt_irq_str
) != NULL
) {
294 static int get_irqs(void)
296 char count_path
[PATH_MAX
];
297 int total_intrs
= -1;
301 snprintf(count_path
, sizeof(count_path
),
302 "/sys/kernel/irq/%i/per_cpu_count", irq_no
);
303 f_count_proc
= fopen(count_path
, "r");
304 if (f_count_proc
== NULL
) {
305 printf("Failed to open %s\n", count_path
);
309 if (fgets(line
, sizeof(line
), f_count_proc
) == NULL
||
310 line
[strlen(line
) - 1] != '\n') {
311 printf("Error reading from %s\n", count_path
);
313 static const char com
[2] = ",";
317 token
= strtok(line
, com
);
318 while (token
!= NULL
) {
319 /* sum up interrupts across all cores */
320 total_intrs
+= atoi(token
);
321 token
= strtok(NULL
, com
);
325 fclose(f_count_proc
);
330 static void dump_driver_stats(long dt
)
334 for (i
= 0; i
< num_socks
&& xsks
[i
]; i
++) {
335 char *fmt
= "%-18s %'-14.0f %'-14lu\n";
337 int n_ints
= get_irqs();
340 printf("error getting intr info for intr %i\n", irq_no
);
343 xsks
[i
]->drv_stats
.intrs
= n_ints
- irqs_at_init
;
345 intrs_ps
= (xsks
[i
]->drv_stats
.intrs
- xsks
[i
]->drv_stats
.prev_intrs
) *
348 printf("\n%-18s %-14s %-14s\n", "", "intrs/s", "count");
349 printf(fmt
, "irqs", intrs_ps
, xsks
[i
]->drv_stats
.intrs
);
351 xsks
[i
]->drv_stats
.prev_intrs
= xsks
[i
]->drv_stats
.intrs
;
355 static void dump_stats(void)
357 unsigned long now
= get_nsecs();
358 long dt
= now
- prev_time
;
363 for (i
= 0; i
< num_socks
&& xsks
[i
]; i
++) {
364 char *fmt
= "%-18s %'-14.0f %'-14lu\n";
365 double rx_pps
, tx_pps
, dropped_pps
, rx_invalid_pps
, full_pps
, fill_empty_pps
,
366 tx_invalid_pps
, tx_empty_pps
;
368 rx_pps
= (xsks
[i
]->ring_stats
.rx_npkts
- xsks
[i
]->ring_stats
.prev_rx_npkts
) *
370 tx_pps
= (xsks
[i
]->ring_stats
.tx_npkts
- xsks
[i
]->ring_stats
.prev_tx_npkts
) *
373 printf("\n sock%d@", i
);
374 print_benchmark(false);
377 printf("%-18s %-14s %-14s %-14.2f\n", "", "pps", "pkts",
379 printf(fmt
, "rx", rx_pps
, xsks
[i
]->ring_stats
.rx_npkts
);
380 printf(fmt
, "tx", tx_pps
, xsks
[i
]->ring_stats
.tx_npkts
);
382 xsks
[i
]->ring_stats
.prev_rx_npkts
= xsks
[i
]->ring_stats
.rx_npkts
;
383 xsks
[i
]->ring_stats
.prev_tx_npkts
= xsks
[i
]->ring_stats
.tx_npkts
;
385 if (opt_extra_stats
) {
386 if (!xsk_get_xdp_stats(xsk_socket__fd(xsks
[i
]->xsk
), xsks
[i
])) {
387 dropped_pps
= (xsks
[i
]->ring_stats
.rx_dropped_npkts
-
388 xsks
[i
]->ring_stats
.prev_rx_dropped_npkts
) *
390 rx_invalid_pps
= (xsks
[i
]->ring_stats
.rx_invalid_npkts
-
391 xsks
[i
]->ring_stats
.prev_rx_invalid_npkts
) *
393 tx_invalid_pps
= (xsks
[i
]->ring_stats
.tx_invalid_npkts
-
394 xsks
[i
]->ring_stats
.prev_tx_invalid_npkts
) *
396 full_pps
= (xsks
[i
]->ring_stats
.rx_full_npkts
-
397 xsks
[i
]->ring_stats
.prev_rx_full_npkts
) *
399 fill_empty_pps
= (xsks
[i
]->ring_stats
.rx_fill_empty_npkts
-
400 xsks
[i
]->ring_stats
.prev_rx_fill_empty_npkts
) *
402 tx_empty_pps
= (xsks
[i
]->ring_stats
.tx_empty_npkts
-
403 xsks
[i
]->ring_stats
.prev_tx_empty_npkts
) *
406 printf(fmt
, "rx dropped", dropped_pps
,
407 xsks
[i
]->ring_stats
.rx_dropped_npkts
);
408 printf(fmt
, "rx invalid", rx_invalid_pps
,
409 xsks
[i
]->ring_stats
.rx_invalid_npkts
);
410 printf(fmt
, "tx invalid", tx_invalid_pps
,
411 xsks
[i
]->ring_stats
.tx_invalid_npkts
);
412 printf(fmt
, "rx queue full", full_pps
,
413 xsks
[i
]->ring_stats
.rx_full_npkts
);
414 printf(fmt
, "fill ring empty", fill_empty_pps
,
415 xsks
[i
]->ring_stats
.rx_fill_empty_npkts
);
416 printf(fmt
, "tx ring empty", tx_empty_pps
,
417 xsks
[i
]->ring_stats
.tx_empty_npkts
);
419 xsks
[i
]->ring_stats
.prev_rx_dropped_npkts
=
420 xsks
[i
]->ring_stats
.rx_dropped_npkts
;
421 xsks
[i
]->ring_stats
.prev_rx_invalid_npkts
=
422 xsks
[i
]->ring_stats
.rx_invalid_npkts
;
423 xsks
[i
]->ring_stats
.prev_tx_invalid_npkts
=
424 xsks
[i
]->ring_stats
.tx_invalid_npkts
;
425 xsks
[i
]->ring_stats
.prev_rx_full_npkts
=
426 xsks
[i
]->ring_stats
.rx_full_npkts
;
427 xsks
[i
]->ring_stats
.prev_rx_fill_empty_npkts
=
428 xsks
[i
]->ring_stats
.rx_fill_empty_npkts
;
429 xsks
[i
]->ring_stats
.prev_tx_empty_npkts
=
430 xsks
[i
]->ring_stats
.tx_empty_npkts
;
432 printf("%-15s\n", "Error retrieving extra stats");
440 dump_driver_stats(dt
);
443 static bool is_benchmark_done(void)
445 if (opt_duration
> 0) {
446 unsigned long dt
= (get_nsecs() - start_time
);
448 if (dt
>= opt_duration
)
449 benchmark_done
= true;
451 return benchmark_done
;
454 static void *poller(void *arg
)
457 while (!is_benchmark_done()) {
465 static void remove_xdp_program(void)
467 u32 curr_prog_id
= 0;
468 int cmd
= CLOSE_CONN
;
470 if (bpf_get_link_xdp_id(opt_ifindex
, &curr_prog_id
, opt_xdp_flags
)) {
471 printf("bpf_get_link_xdp_id failed\n");
474 if (prog_id
== curr_prog_id
)
475 bpf_set_link_xdp_fd(opt_ifindex
, -1, opt_xdp_flags
);
476 else if (!curr_prog_id
)
477 printf("couldn't find a prog id on a given interface\n");
479 printf("program on interface changed, not removing\n");
481 if (opt_reduced_cap
) {
482 if (write(sock
, &cmd
, sizeof(int)) < 0) {
483 fprintf(stderr
, "Error writing into stream socket: %s", strerror(errno
));
489 static void int_exit(int sig
)
491 benchmark_done
= true;
494 static void xdpsock_cleanup(void)
496 struct xsk_umem
*umem
= xsks
[0]->umem
->umem
;
500 for (i
= 0; i
< num_socks
; i
++)
501 xsk_socket__delete(xsks
[i
]->xsk
);
502 (void)xsk_umem__delete(umem
);
503 remove_xdp_program();
506 static void __exit_with_error(int error
, const char *file
, const char *func
,
509 fprintf(stderr
, "%s:%s:%i: errno: %d/\"%s\"\n", file
, func
,
510 line
, error
, strerror(error
));
512 remove_xdp_program();
516 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
518 static void swap_mac_addresses(void *data
)
520 struct ether_header
*eth
= (struct ether_header
*)data
;
521 struct ether_addr
*src_addr
= (struct ether_addr
*)ð
->ether_shost
;
522 struct ether_addr
*dst_addr
= (struct ether_addr
*)ð
->ether_dhost
;
523 struct ether_addr tmp
;
526 *src_addr
= *dst_addr
;
530 static void hex_dump(void *pkt
, size_t length
, u64 addr
)
532 const unsigned char *address
= (unsigned char *)pkt
;
533 const unsigned char *line
= address
;
534 size_t line_size
= 32;
542 sprintf(buf
, "addr=%llu", addr
);
543 printf("length = %zu\n", length
);
544 printf("%s | ", buf
);
545 while (length
-- > 0) {
546 printf("%02X ", *address
++);
547 if (!(++i
% line_size
) || (length
== 0 && i
% line_size
)) {
549 while (i
++ % line_size
)
552 printf(" | "); /* right close */
553 while (line
< address
) {
555 printf("%c", (c
< 33 || c
== 255) ? 0x2E : c
);
559 printf("%s | ", buf
);
565 static void *memset32_htonl(void *dest
, u32 val
, u32 size
)
567 u32
*ptr
= (u32
*)dest
;
572 for (i
= 0; i
< (size
& (~0x3)); i
+= 4)
575 for (; i
< size
; i
++)
576 ((char *)dest
)[i
] = ((char *)&val
)[i
& 3];
582 * This function code has been taken from
583 * Linux kernel lib/checksum.c
585 static inline unsigned short from32to16(unsigned int x
)
587 /* add up 16-bit and 16-bit for 16+c bit */
588 x
= (x
& 0xffff) + (x
>> 16);
590 x
= (x
& 0xffff) + (x
>> 16);
595 * This function code has been taken from
596 * Linux kernel lib/checksum.c
598 static unsigned int do_csum(const unsigned char *buff
, int len
)
600 unsigned int result
= 0;
605 odd
= 1 & (unsigned long)buff
;
607 #ifdef __LITTLE_ENDIAN
608 result
+= (*buff
<< 8);
616 if (2 & (unsigned long)buff
) {
617 result
+= *(unsigned short *)buff
;
622 const unsigned char *end
= buff
+
623 ((unsigned int)len
& ~3);
624 unsigned int carry
= 0;
627 unsigned int w
= *(unsigned int *)buff
;
632 carry
= (w
> result
);
633 } while (buff
< end
);
635 result
= (result
& 0xffff) + (result
>> 16);
638 result
+= *(unsigned short *)buff
;
643 #ifdef __LITTLE_ENDIAN
646 result
+= (*buff
<< 8);
648 result
= from32to16(result
);
650 result
= ((result
>> 8) & 0xff) | ((result
& 0xff) << 8);
655 __sum16
ip_fast_csum(const void *iph
, unsigned int ihl
);
658 * This is a version of ip_compute_csum() optimized for IP headers,
659 * which always checksum on 4 octet boundaries.
660 * This function code has been taken from
661 * Linux kernel lib/checksum.c
663 __sum16
ip_fast_csum(const void *iph
, unsigned int ihl
)
665 return (__force __sum16
)~do_csum(iph
, ihl
* 4);
669 * Fold a partial checksum
670 * This function code has been taken from
671 * Linux kernel include/asm-generic/checksum.h
673 static inline __sum16
csum_fold(__wsum csum
)
675 u32 sum
= (__force u32
)csum
;
677 sum
= (sum
& 0xffff) + (sum
>> 16);
678 sum
= (sum
& 0xffff) + (sum
>> 16);
679 return (__force __sum16
)~sum
;
683 * This function code has been taken from
684 * Linux kernel lib/checksum.c
686 static inline u32
from64to32(u64 x
)
688 /* add up 32-bit and 32-bit for 32+c bit */
689 x
= (x
& 0xffffffff) + (x
>> 32);
691 x
= (x
& 0xffffffff) + (x
>> 32);
695 __wsum
csum_tcpudp_nofold(__be32 saddr
, __be32 daddr
,
696 __u32 len
, __u8 proto
, __wsum sum
);
699 * This function code has been taken from
700 * Linux kernel lib/checksum.c
702 __wsum
csum_tcpudp_nofold(__be32 saddr
, __be32 daddr
,
703 __u32 len
, __u8 proto
, __wsum sum
)
705 unsigned long long s
= (__force u32
)sum
;
707 s
+= (__force u32
)saddr
;
708 s
+= (__force u32
)daddr
;
709 #ifdef __BIG_ENDIAN__
712 s
+= (proto
+ len
) << 8;
714 return (__force __wsum
)from64to32(s
);
718 * This function has been taken from
719 * Linux kernel include/asm-generic/checksum.h
721 static inline __sum16
722 csum_tcpudp_magic(__be32 saddr
, __be32 daddr
, __u32 len
,
723 __u8 proto
, __wsum sum
)
725 return csum_fold(csum_tcpudp_nofold(saddr
, daddr
, len
, proto
, sum
));
728 static inline u16
udp_csum(u32 saddr
, u32 daddr
, u32 len
,
729 u8 proto
, u16
*udp_pkt
)
734 /* udp hdr and data */
735 for (; cnt
< len
; cnt
+= 2)
736 csum
+= udp_pkt
[cnt
>> 1];
738 return csum_tcpudp_magic(saddr
, daddr
, len
, proto
, csum
);
741 #define ETH_FCS_SIZE 4
743 #define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
744 sizeof(struct udphdr))
746 #define PKT_SIZE (opt_pkt_size - ETH_FCS_SIZE)
747 #define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr))
748 #define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
749 #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
751 static u8 pkt_data
[XSK_UMEM__DEFAULT_FRAME_SIZE
];
753 static void gen_eth_hdr_data(void)
755 struct udphdr
*udp_hdr
= (struct udphdr
*)(pkt_data
+
756 sizeof(struct ethhdr
) +
757 sizeof(struct iphdr
));
758 struct iphdr
*ip_hdr
= (struct iphdr
*)(pkt_data
+
759 sizeof(struct ethhdr
));
760 struct ethhdr
*eth_hdr
= (struct ethhdr
*)pkt_data
;
762 /* ethernet header */
763 memcpy(eth_hdr
->h_dest
, "\x3c\xfd\xfe\x9e\x7f\x71", ETH_ALEN
);
764 memcpy(eth_hdr
->h_source
, "\xec\xb1\xd7\x98\x3a\xc0", ETH_ALEN
);
765 eth_hdr
->h_proto
= htons(ETH_P_IP
);
768 ip_hdr
->version
= IPVERSION
;
769 ip_hdr
->ihl
= 0x5; /* 20 byte header */
771 ip_hdr
->tot_len
= htons(IP_PKT_SIZE
);
773 ip_hdr
->frag_off
= 0;
774 ip_hdr
->ttl
= IPDEFTTL
;
775 ip_hdr
->protocol
= IPPROTO_UDP
;
776 ip_hdr
->saddr
= htonl(0x0a0a0a10);
777 ip_hdr
->daddr
= htonl(0x0a0a0a20);
779 /* IP header checksum */
781 ip_hdr
->check
= ip_fast_csum((const void *)ip_hdr
, ip_hdr
->ihl
);
784 udp_hdr
->source
= htons(0x1000);
785 udp_hdr
->dest
= htons(0x1000);
786 udp_hdr
->len
= htons(UDP_PKT_SIZE
);
789 memset32_htonl(pkt_data
+ PKT_HDR_SIZE
, opt_pkt_fill_pattern
,
792 /* UDP header checksum */
794 udp_hdr
->check
= udp_csum(ip_hdr
->saddr
, ip_hdr
->daddr
, UDP_PKT_SIZE
,
795 IPPROTO_UDP
, (u16
*)udp_hdr
);
798 static void gen_eth_frame(struct xsk_umem_info
*umem
, u64 addr
)
800 memcpy(xsk_umem__get_data(umem
->buffer
, addr
), pkt_data
,
804 static struct xsk_umem_info
*xsk_configure_umem(void *buffer
, u64 size
)
806 struct xsk_umem_info
*umem
;
807 struct xsk_umem_config cfg
= {
808 /* We recommend that you set the fill ring size >= HW RX ring size +
809 * AF_XDP RX ring size. Make sure you fill up the fill ring
810 * with buffers at regular intervals, and you will with this setting
811 * avoid allocation failures in the driver. These are usually quite
812 * expensive since drivers have not been written to assume that
813 * allocation failures are common. For regular sockets, kernel
814 * allocated memory is used that only runs out in OOM situations
815 * that should be rare.
817 .fill_size
= XSK_RING_PROD__DEFAULT_NUM_DESCS
* 2,
818 .comp_size
= XSK_RING_CONS__DEFAULT_NUM_DESCS
,
819 .frame_size
= opt_xsk_frame_size
,
820 .frame_headroom
= XSK_UMEM__DEFAULT_FRAME_HEADROOM
,
821 .flags
= opt_umem_flags
825 umem
= calloc(1, sizeof(*umem
));
827 exit_with_error(errno
);
829 ret
= xsk_umem__create(&umem
->umem
, buffer
, size
, &umem
->fq
, &umem
->cq
,
832 exit_with_error(-ret
);
834 umem
->buffer
= buffer
;
838 static void xsk_populate_fill_ring(struct xsk_umem_info
*umem
)
843 ret
= xsk_ring_prod__reserve(&umem
->fq
,
844 XSK_RING_PROD__DEFAULT_NUM_DESCS
* 2, &idx
);
845 if (ret
!= XSK_RING_PROD__DEFAULT_NUM_DESCS
* 2)
846 exit_with_error(-ret
);
847 for (i
= 0; i
< XSK_RING_PROD__DEFAULT_NUM_DESCS
* 2; i
++)
848 *xsk_ring_prod__fill_addr(&umem
->fq
, idx
++) =
849 i
* opt_xsk_frame_size
;
850 xsk_ring_prod__submit(&umem
->fq
, XSK_RING_PROD__DEFAULT_NUM_DESCS
* 2);
853 static struct xsk_socket_info
*xsk_configure_socket(struct xsk_umem_info
*umem
,
856 struct xsk_socket_config cfg
;
857 struct xsk_socket_info
*xsk
;
858 struct xsk_ring_cons
*rxr
;
859 struct xsk_ring_prod
*txr
;
862 xsk
= calloc(1, sizeof(*xsk
));
864 exit_with_error(errno
);
867 cfg
.rx_size
= XSK_RING_CONS__DEFAULT_NUM_DESCS
;
868 cfg
.tx_size
= XSK_RING_PROD__DEFAULT_NUM_DESCS
;
869 if (opt_num_xsks
> 1 || opt_reduced_cap
)
870 cfg
.libbpf_flags
= XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD
;
872 cfg
.libbpf_flags
= 0;
873 cfg
.xdp_flags
= opt_xdp_flags
;
874 cfg
.bind_flags
= opt_xdp_bind_flags
;
876 rxr
= rx
? &xsk
->rx
: NULL
;
877 txr
= tx
? &xsk
->tx
: NULL
;
878 ret
= xsk_socket__create(&xsk
->xsk
, opt_if
, opt_queue
, umem
->umem
,
881 exit_with_error(-ret
);
883 ret
= bpf_get_link_xdp_id(opt_ifindex
, &prog_id
, opt_xdp_flags
);
885 exit_with_error(-ret
);
887 xsk
->app_stats
.rx_empty_polls
= 0;
888 xsk
->app_stats
.fill_fail_polls
= 0;
889 xsk
->app_stats
.copy_tx_sendtos
= 0;
890 xsk
->app_stats
.tx_wakeup_sendtos
= 0;
891 xsk
->app_stats
.opt_polls
= 0;
892 xsk
->app_stats
.prev_rx_empty_polls
= 0;
893 xsk
->app_stats
.prev_fill_fail_polls
= 0;
894 xsk
->app_stats
.prev_copy_tx_sendtos
= 0;
895 xsk
->app_stats
.prev_tx_wakeup_sendtos
= 0;
896 xsk
->app_stats
.prev_opt_polls
= 0;
901 static struct option long_options
[] = {
902 {"rxdrop", no_argument
, 0, 'r'},
903 {"txonly", no_argument
, 0, 't'},
904 {"l2fwd", no_argument
, 0, 'l'},
905 {"interface", required_argument
, 0, 'i'},
906 {"queue", required_argument
, 0, 'q'},
907 {"poll", no_argument
, 0, 'p'},
908 {"xdp-skb", no_argument
, 0, 'S'},
909 {"xdp-native", no_argument
, 0, 'N'},
910 {"interval", required_argument
, 0, 'n'},
911 {"zero-copy", no_argument
, 0, 'z'},
912 {"copy", no_argument
, 0, 'c'},
913 {"frame-size", required_argument
, 0, 'f'},
914 {"no-need-wakeup", no_argument
, 0, 'm'},
915 {"unaligned", no_argument
, 0, 'u'},
916 {"shared-umem", no_argument
, 0, 'M'},
917 {"force", no_argument
, 0, 'F'},
918 {"duration", required_argument
, 0, 'd'},
919 {"batch-size", required_argument
, 0, 'b'},
920 {"tx-pkt-count", required_argument
, 0, 'C'},
921 {"tx-pkt-size", required_argument
, 0, 's'},
922 {"tx-pkt-pattern", required_argument
, 0, 'P'},
923 {"extra-stats", no_argument
, 0, 'x'},
924 {"quiet", no_argument
, 0, 'Q'},
925 {"app-stats", no_argument
, 0, 'a'},
926 {"irq-string", no_argument
, 0, 'I'},
927 {"busy-poll", no_argument
, 0, 'B'},
928 {"reduce-cap", no_argument
, 0, 'R'},
932 static void usage(const char *prog
)
935 " Usage: %s [OPTIONS]\n"
937 " -r, --rxdrop Discard all incoming packets (default)\n"
938 " -t, --txonly Only send packets\n"
939 " -l, --l2fwd MAC swap L2 forwarding\n"
940 " -i, --interface=n Run on interface n\n"
941 " -q, --queue=n Use queue n (default 0)\n"
942 " -p, --poll Use poll syscall\n"
943 " -S, --xdp-skb=n Use XDP skb-mod\n"
944 " -N, --xdp-native=n Enforce XDP native mode\n"
945 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
946 " -z, --zero-copy Force zero-copy mode.\n"
947 " -c, --copy Force copy mode.\n"
948 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
949 " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
950 " -u, --unaligned Enable unaligned chunk placement\n"
951 " -M, --shared-umem Enable XDP_SHARED_UMEM (cannot be used with -R)\n"
952 " -F, --force Force loading the XDP prog\n"
953 " -d, --duration=n Duration in secs to run command.\n"
954 " Default: forever.\n"
955 " -b, --batch-size=n Batch size for sending or receiving\n"
956 " packets. Default: %d\n"
957 " -C, --tx-pkt-count=n Number of packets to send.\n"
958 " Default: Continuous packets.\n"
959 " -s, --tx-pkt-size=n Transmit packet size.\n"
960 " (Default: %d bytes)\n"
961 " Min size: %d, Max size %d.\n"
962 " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
963 " -x, --extra-stats Display extra statistics.\n"
964 " -Q, --quiet Do not display any stats.\n"
965 " -a, --app-stats Display application (syscall) statistics.\n"
966 " -I, --irq-string Display driver interrupt statistics for interface associated with irq-string.\n"
967 " -B, --busy-poll Busy poll.\n"
968 " -R, --reduce-cap Use reduced capabilities (cannot be used with -M)\n"
970 fprintf(stderr
, str
, prog
, XSK_UMEM__DEFAULT_FRAME_SIZE
,
971 opt_batch_size
, MIN_PKT_SIZE
, MIN_PKT_SIZE
,
972 XSK_UMEM__DEFAULT_FRAME_SIZE
, opt_pkt_fill_pattern
);
977 static void parse_command_line(int argc
, char **argv
)
984 c
= getopt_long(argc
, argv
, "Frtli:q:pSNn:czf:muMd:b:C:s:P:xQaI:BR",
985 long_options
, &option_index
);
991 opt_bench
= BENCH_RXDROP
;
994 opt_bench
= BENCH_TXONLY
;
997 opt_bench
= BENCH_L2FWD
;
1003 opt_queue
= atoi(optarg
);
1009 opt_xdp_flags
|= XDP_FLAGS_SKB_MODE
;
1010 opt_xdp_bind_flags
|= XDP_COPY
;
1013 /* default, set below */
1016 opt_interval
= atoi(optarg
);
1019 opt_xdp_bind_flags
|= XDP_ZEROCOPY
;
1022 opt_xdp_bind_flags
|= XDP_COPY
;
1025 opt_umem_flags
|= XDP_UMEM_UNALIGNED_CHUNK_FLAG
;
1026 opt_unaligned_chunks
= 1;
1027 opt_mmap_flags
= MAP_HUGETLB
;
1030 opt_xdp_flags
&= ~XDP_FLAGS_UPDATE_IF_NOEXIST
;
1033 opt_xsk_frame_size
= atoi(optarg
);
1036 opt_need_wakeup
= false;
1037 opt_xdp_bind_flags
&= ~XDP_USE_NEED_WAKEUP
;
1040 opt_num_xsks
= MAX_SOCKS
;
1043 opt_duration
= atoi(optarg
);
1044 opt_duration
*= 1000000000;
1047 opt_batch_size
= atoi(optarg
);
1050 opt_pkt_count
= atoi(optarg
);
1053 opt_pkt_size
= atoi(optarg
);
1054 if (opt_pkt_size
> (XSK_UMEM__DEFAULT_FRAME_SIZE
) ||
1055 opt_pkt_size
< MIN_PKT_SIZE
) {
1057 "ERROR: Invalid frame size %d\n",
1059 usage(basename(argv
[0]));
1063 opt_pkt_fill_pattern
= strtol(optarg
, NULL
, 16);
1066 opt_extra_stats
= 1;
1075 opt_irq_str
= optarg
;
1076 if (get_interrupt_number())
1077 irqs_at_init
= get_irqs();
1078 if (irqs_at_init
< 0) {
1079 fprintf(stderr
, "ERROR: Failed to get irqs for %s\n", opt_irq_str
);
1080 usage(basename(argv
[0]));
1087 opt_reduced_cap
= true;
1090 usage(basename(argv
[0]));
1094 if (!(opt_xdp_flags
& XDP_FLAGS_SKB_MODE
))
1095 opt_xdp_flags
|= XDP_FLAGS_DRV_MODE
;
1097 opt_ifindex
= if_nametoindex(opt_if
);
1099 fprintf(stderr
, "ERROR: interface \"%s\" does not exist\n",
1101 usage(basename(argv
[0]));
1104 if ((opt_xsk_frame_size
& (opt_xsk_frame_size
- 1)) &&
1105 !opt_unaligned_chunks
) {
1106 fprintf(stderr
, "--frame-size=%d is not a power of two\n",
1107 opt_xsk_frame_size
);
1108 usage(basename(argv
[0]));
1111 if (opt_reduced_cap
&& opt_num_xsks
> 1) {
1112 fprintf(stderr
, "ERROR: -M and -R cannot be used together\n");
1113 usage(basename(argv
[0]));
1117 static void kick_tx(struct xsk_socket_info
*xsk
)
1121 ret
= sendto(xsk_socket__fd(xsk
->xsk
), NULL
, 0, MSG_DONTWAIT
, NULL
, 0);
1122 if (ret
>= 0 || errno
== ENOBUFS
|| errno
== EAGAIN
||
1123 errno
== EBUSY
|| errno
== ENETDOWN
)
1125 exit_with_error(errno
);
1128 static inline void complete_tx_l2fwd(struct xsk_socket_info
*xsk
)
1130 struct xsk_umem_info
*umem
= xsk
->umem
;
1131 u32 idx_cq
= 0, idx_fq
= 0;
1135 if (!xsk
->outstanding_tx
)
1138 /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
1139 * really send the packets. In zero-copy mode we do not have to do this, since Tx
1140 * is driven by the NAPI loop. So as an optimization, we do not have to call
1141 * sendto() all the time in zero-copy mode for l2fwd.
1143 if (opt_xdp_bind_flags
& XDP_COPY
) {
1144 xsk
->app_stats
.copy_tx_sendtos
++;
1148 ndescs
= (xsk
->outstanding_tx
> opt_batch_size
) ? opt_batch_size
:
1149 xsk
->outstanding_tx
;
1151 /* re-add completed Tx buffers */
1152 rcvd
= xsk_ring_cons__peek(&umem
->cq
, ndescs
, &idx_cq
);
1157 ret
= xsk_ring_prod__reserve(&umem
->fq
, rcvd
, &idx_fq
);
1158 while (ret
!= rcvd
) {
1160 exit_with_error(-ret
);
1161 if (opt_busy_poll
|| xsk_ring_prod__needs_wakeup(&umem
->fq
)) {
1162 xsk
->app_stats
.fill_fail_polls
++;
1163 recvfrom(xsk_socket__fd(xsk
->xsk
), NULL
, 0, MSG_DONTWAIT
, NULL
,
1166 ret
= xsk_ring_prod__reserve(&umem
->fq
, rcvd
, &idx_fq
);
1169 for (i
= 0; i
< rcvd
; i
++)
1170 *xsk_ring_prod__fill_addr(&umem
->fq
, idx_fq
++) =
1171 *xsk_ring_cons__comp_addr(&umem
->cq
, idx_cq
++);
1173 xsk_ring_prod__submit(&xsk
->umem
->fq
, rcvd
);
1174 xsk_ring_cons__release(&xsk
->umem
->cq
, rcvd
);
1175 xsk
->outstanding_tx
-= rcvd
;
1179 static inline void complete_tx_only(struct xsk_socket_info
*xsk
,
1185 if (!xsk
->outstanding_tx
)
1188 if (!opt_need_wakeup
|| xsk_ring_prod__needs_wakeup(&xsk
->tx
)) {
1189 xsk
->app_stats
.tx_wakeup_sendtos
++;
1193 rcvd
= xsk_ring_cons__peek(&xsk
->umem
->cq
, batch_size
, &idx
);
1195 xsk_ring_cons__release(&xsk
->umem
->cq
, rcvd
);
1196 xsk
->outstanding_tx
-= rcvd
;
1200 static void rx_drop(struct xsk_socket_info
*xsk
)
1202 unsigned int rcvd
, i
;
1203 u32 idx_rx
= 0, idx_fq
= 0;
1206 rcvd
= xsk_ring_cons__peek(&xsk
->rx
, opt_batch_size
, &idx_rx
);
1208 if (opt_busy_poll
|| xsk_ring_prod__needs_wakeup(&xsk
->umem
->fq
)) {
1209 xsk
->app_stats
.rx_empty_polls
++;
1210 recvfrom(xsk_socket__fd(xsk
->xsk
), NULL
, 0, MSG_DONTWAIT
, NULL
, NULL
);
1215 ret
= xsk_ring_prod__reserve(&xsk
->umem
->fq
, rcvd
, &idx_fq
);
1216 while (ret
!= rcvd
) {
1218 exit_with_error(-ret
);
1219 if (opt_busy_poll
|| xsk_ring_prod__needs_wakeup(&xsk
->umem
->fq
)) {
1220 xsk
->app_stats
.fill_fail_polls
++;
1221 recvfrom(xsk_socket__fd(xsk
->xsk
), NULL
, 0, MSG_DONTWAIT
, NULL
, NULL
);
1223 ret
= xsk_ring_prod__reserve(&xsk
->umem
->fq
, rcvd
, &idx_fq
);
1226 for (i
= 0; i
< rcvd
; i
++) {
1227 u64 addr
= xsk_ring_cons__rx_desc(&xsk
->rx
, idx_rx
)->addr
;
1228 u32 len
= xsk_ring_cons__rx_desc(&xsk
->rx
, idx_rx
++)->len
;
1229 u64 orig
= xsk_umem__extract_addr(addr
);
1231 addr
= xsk_umem__add_offset_to_addr(addr
);
1232 char *pkt
= xsk_umem__get_data(xsk
->umem
->buffer
, addr
);
1234 hex_dump(pkt
, len
, addr
);
1235 *xsk_ring_prod__fill_addr(&xsk
->umem
->fq
, idx_fq
++) = orig
;
1238 xsk_ring_prod__submit(&xsk
->umem
->fq
, rcvd
);
1239 xsk_ring_cons__release(&xsk
->rx
, rcvd
);
1240 xsk
->ring_stats
.rx_npkts
+= rcvd
;
1243 static void rx_drop_all(void)
1245 struct pollfd fds
[MAX_SOCKS
] = {};
1248 for (i
= 0; i
< num_socks
; i
++) {
1249 fds
[i
].fd
= xsk_socket__fd(xsks
[i
]->xsk
);
1250 fds
[i
].events
= POLLIN
;
1255 for (i
= 0; i
< num_socks
; i
++)
1256 xsks
[i
]->app_stats
.opt_polls
++;
1257 ret
= poll(fds
, num_socks
, opt_timeout
);
1262 for (i
= 0; i
< num_socks
; i
++)
1270 static void tx_only(struct xsk_socket_info
*xsk
, u32
*frame_nb
, int batch_size
)
1275 while (xsk_ring_prod__reserve(&xsk
->tx
, batch_size
, &idx
) <
1277 complete_tx_only(xsk
, batch_size
);
1282 for (i
= 0; i
< batch_size
; i
++) {
1283 struct xdp_desc
*tx_desc
= xsk_ring_prod__tx_desc(&xsk
->tx
,
1285 tx_desc
->addr
= (*frame_nb
+ i
) << XSK_UMEM__DEFAULT_FRAME_SHIFT
;
1286 tx_desc
->len
= PKT_SIZE
;
1289 xsk_ring_prod__submit(&xsk
->tx
, batch_size
);
1290 xsk
->ring_stats
.tx_npkts
+= batch_size
;
1291 xsk
->outstanding_tx
+= batch_size
;
1292 *frame_nb
+= batch_size
;
1293 *frame_nb
%= NUM_FRAMES
;
1294 complete_tx_only(xsk
, batch_size
);
1297 static inline int get_batch_size(int pkt_cnt
)
1300 return opt_batch_size
;
1302 if (pkt_cnt
+ opt_batch_size
<= opt_pkt_count
)
1303 return opt_batch_size
;
1305 return opt_pkt_count
- pkt_cnt
;
1308 static void complete_tx_only_all(void)
1315 for (i
= 0; i
< num_socks
; i
++) {
1316 if (xsks
[i
]->outstanding_tx
) {
1317 complete_tx_only(xsks
[i
], opt_batch_size
);
1318 pending
= !!xsks
[i
]->outstanding_tx
;
1324 static void tx_only_all(void)
1326 struct pollfd fds
[MAX_SOCKS
] = {};
1327 u32 frame_nb
[MAX_SOCKS
] = {};
1331 for (i
= 0; i
< num_socks
; i
++) {
1332 fds
[0].fd
= xsk_socket__fd(xsks
[i
]->xsk
);
1333 fds
[0].events
= POLLOUT
;
1336 while ((opt_pkt_count
&& pkt_cnt
< opt_pkt_count
) || !opt_pkt_count
) {
1337 int batch_size
= get_batch_size(pkt_cnt
);
1340 for (i
= 0; i
< num_socks
; i
++)
1341 xsks
[i
]->app_stats
.opt_polls
++;
1342 ret
= poll(fds
, num_socks
, opt_timeout
);
1346 if (!(fds
[0].revents
& POLLOUT
))
1350 for (i
= 0; i
< num_socks
; i
++)
1351 tx_only(xsks
[i
], &frame_nb
[i
], batch_size
);
1353 pkt_cnt
+= batch_size
;
1360 complete_tx_only_all();
1363 static void l2fwd(struct xsk_socket_info
*xsk
)
1365 unsigned int rcvd
, i
;
1366 u32 idx_rx
= 0, idx_tx
= 0;
1369 complete_tx_l2fwd(xsk
);
1371 rcvd
= xsk_ring_cons__peek(&xsk
->rx
, opt_batch_size
, &idx_rx
);
1373 if (opt_busy_poll
|| xsk_ring_prod__needs_wakeup(&xsk
->umem
->fq
)) {
1374 xsk
->app_stats
.rx_empty_polls
++;
1375 recvfrom(xsk_socket__fd(xsk
->xsk
), NULL
, 0, MSG_DONTWAIT
, NULL
, NULL
);
1379 xsk
->ring_stats
.rx_npkts
+= rcvd
;
1381 ret
= xsk_ring_prod__reserve(&xsk
->tx
, rcvd
, &idx_tx
);
1382 while (ret
!= rcvd
) {
1384 exit_with_error(-ret
);
1385 complete_tx_l2fwd(xsk
);
1386 if (opt_busy_poll
|| xsk_ring_prod__needs_wakeup(&xsk
->tx
)) {
1387 xsk
->app_stats
.tx_wakeup_sendtos
++;
1390 ret
= xsk_ring_prod__reserve(&xsk
->tx
, rcvd
, &idx_tx
);
1393 for (i
= 0; i
< rcvd
; i
++) {
1394 u64 addr
= xsk_ring_cons__rx_desc(&xsk
->rx
, idx_rx
)->addr
;
1395 u32 len
= xsk_ring_cons__rx_desc(&xsk
->rx
, idx_rx
++)->len
;
1398 addr
= xsk_umem__add_offset_to_addr(addr
);
1399 char *pkt
= xsk_umem__get_data(xsk
->umem
->buffer
, addr
);
1401 swap_mac_addresses(pkt
);
1403 hex_dump(pkt
, len
, addr
);
1404 xsk_ring_prod__tx_desc(&xsk
->tx
, idx_tx
)->addr
= orig
;
1405 xsk_ring_prod__tx_desc(&xsk
->tx
, idx_tx
++)->len
= len
;
1408 xsk_ring_prod__submit(&xsk
->tx
, rcvd
);
1409 xsk_ring_cons__release(&xsk
->rx
, rcvd
);
1411 xsk
->ring_stats
.tx_npkts
+= rcvd
;
1412 xsk
->outstanding_tx
+= rcvd
;
1415 static void l2fwd_all(void)
1417 struct pollfd fds
[MAX_SOCKS
] = {};
1422 for (i
= 0; i
< num_socks
; i
++) {
1423 fds
[i
].fd
= xsk_socket__fd(xsks
[i
]->xsk
);
1424 fds
[i
].events
= POLLOUT
| POLLIN
;
1425 xsks
[i
]->app_stats
.opt_polls
++;
1427 ret
= poll(fds
, num_socks
, opt_timeout
);
1432 for (i
= 0; i
< num_socks
; i
++)
1440 static void load_xdp_program(char **argv
, struct bpf_object
**obj
)
1442 struct bpf_prog_load_attr prog_load_attr
= {
1443 .prog_type
= BPF_PROG_TYPE_XDP
,
1445 char xdp_filename
[256];
1448 snprintf(xdp_filename
, sizeof(xdp_filename
), "%s_kern.o", argv
[0]);
1449 prog_load_attr
.file
= xdp_filename
;
1451 if (bpf_prog_load_xattr(&prog_load_attr
, obj
, &prog_fd
))
1454 fprintf(stderr
, "ERROR: no program found: %s\n",
1459 if (bpf_set_link_xdp_fd(opt_ifindex
, prog_fd
, opt_xdp_flags
) < 0) {
1460 fprintf(stderr
, "ERROR: link set xdp fd failed\n");
1465 static void enter_xsks_into_map(struct bpf_object
*obj
)
1467 struct bpf_map
*map
;
1470 map
= bpf_object__find_map_by_name(obj
, "xsks_map");
1471 xsks_map
= bpf_map__fd(map
);
1473 fprintf(stderr
, "ERROR: no xsks map found: %s\n",
1474 strerror(xsks_map
));
1478 for (i
= 0; i
< num_socks
; i
++) {
1479 int fd
= xsk_socket__fd(xsks
[i
]->xsk
);
1483 ret
= bpf_map_update_elem(xsks_map
, &key
, &fd
, 0);
1485 fprintf(stderr
, "ERROR: bpf_map_update_elem %d\n", i
);
1491 static void apply_setsockopt(struct xsk_socket_info
*xsk
)
1499 if (setsockopt(xsk_socket__fd(xsk
->xsk
), SOL_SOCKET
, SO_PREFER_BUSY_POLL
,
1500 (void *)&sock_opt
, sizeof(sock_opt
)) < 0)
1501 exit_with_error(errno
);
1504 if (setsockopt(xsk_socket__fd(xsk
->xsk
), SOL_SOCKET
, SO_BUSY_POLL
,
1505 (void *)&sock_opt
, sizeof(sock_opt
)) < 0)
1506 exit_with_error(errno
);
1508 sock_opt
= opt_batch_size
;
1509 if (setsockopt(xsk_socket__fd(xsk
->xsk
), SOL_SOCKET
, SO_BUSY_POLL_BUDGET
,
1510 (void *)&sock_opt
, sizeof(sock_opt
)) < 0)
1511 exit_with_error(errno
);
1514 static int recv_xsks_map_fd_from_ctrl_node(int sock
, int *_fd
)
1516 char cms
[CMSG_SPACE(sizeof(int))];
1517 struct cmsghdr
*cmsg
;
1523 iov
.iov_base
= &value
;
1524 iov
.iov_len
= sizeof(int);
1527 msg
.msg_namelen
= 0;
1531 msg
.msg_control
= (caddr_t
)cms
;
1532 msg
.msg_controllen
= sizeof(cms
);
1534 len
= recvmsg(sock
, &msg
, 0);
1537 fprintf(stderr
, "Recvmsg failed length incorrect.\n");
1542 fprintf(stderr
, "Recvmsg failed no data\n");
1546 cmsg
= CMSG_FIRSTHDR(&msg
);
1547 *_fd
= *(int *)CMSG_DATA(cmsg
);
1553 recv_xsks_map_fd(int *xsks_map_fd
)
1555 struct sockaddr_un server
;
1558 sock
= socket(AF_UNIX
, SOCK_STREAM
, 0);
1560 fprintf(stderr
, "Error opening socket stream: %s", strerror(errno
));
1564 server
.sun_family
= AF_UNIX
;
1565 strcpy(server
.sun_path
, SOCKET_NAME
);
1567 if (connect(sock
, (struct sockaddr
*)&server
, sizeof(struct sockaddr_un
)) < 0) {
1569 fprintf(stderr
, "Error connecting stream socket: %s", strerror(errno
));
1573 err
= recv_xsks_map_fd_from_ctrl_node(sock
, xsks_map_fd
);
1575 fprintf(stderr
, "Error %d receiving fd\n", err
);
1581 int main(int argc
, char **argv
)
1583 struct __user_cap_header_struct hdr
= { _LINUX_CAPABILITY_VERSION_3
, 0 };
1584 struct __user_cap_data_struct data
[2] = { { 0 } };
1585 struct rlimit r
= {RLIM_INFINITY
, RLIM_INFINITY
};
1586 bool rx
= false, tx
= false;
1587 struct xsk_umem_info
*umem
;
1588 struct bpf_object
*obj
;
1589 int xsks_map_fd
= 0;
1594 parse_command_line(argc
, argv
);
1596 if (opt_reduced_cap
) {
1597 if (capget(&hdr
, data
) < 0)
1598 fprintf(stderr
, "Error getting capabilities\n");
1600 data
->effective
&= CAP_TO_MASK(CAP_NET_RAW
);
1601 data
->permitted
&= CAP_TO_MASK(CAP_NET_RAW
);
1603 if (capset(&hdr
, data
) < 0)
1604 fprintf(stderr
, "Setting capabilities failed\n");
1606 if (capget(&hdr
, data
) < 0) {
1607 fprintf(stderr
, "Error getting capabilities\n");
1609 fprintf(stderr
, "Capabilities EFF %x Caps INH %x Caps Per %x\n",
1610 data
[0].effective
, data
[0].inheritable
, data
[0].permitted
);
1611 fprintf(stderr
, "Capabilities EFF %x Caps INH %x Caps Per %x\n",
1612 data
[1].effective
, data
[1].inheritable
, data
[1].permitted
);
1615 if (setrlimit(RLIMIT_MEMLOCK
, &r
)) {
1616 fprintf(stderr
, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
1621 if (opt_num_xsks
> 1)
1622 load_xdp_program(argv
, &obj
);
1625 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
1626 bufs
= mmap(NULL
, NUM_FRAMES
* opt_xsk_frame_size
,
1627 PROT_READ
| PROT_WRITE
,
1628 MAP_PRIVATE
| MAP_ANONYMOUS
| opt_mmap_flags
, -1, 0);
1629 if (bufs
== MAP_FAILED
) {
1630 printf("ERROR: mmap failed\n");
1634 /* Create sockets... */
1635 umem
= xsk_configure_umem(bufs
, NUM_FRAMES
* opt_xsk_frame_size
);
1636 if (opt_bench
== BENCH_RXDROP
|| opt_bench
== BENCH_L2FWD
) {
1638 xsk_populate_fill_ring(umem
);
1640 if (opt_bench
== BENCH_L2FWD
|| opt_bench
== BENCH_TXONLY
)
1642 for (i
= 0; i
< opt_num_xsks
; i
++)
1643 xsks
[num_socks
++] = xsk_configure_socket(umem
, rx
, tx
);
1645 for (i
= 0; i
< opt_num_xsks
; i
++)
1646 apply_setsockopt(xsks
[i
]);
1648 if (opt_bench
== BENCH_TXONLY
) {
1651 for (i
= 0; i
< NUM_FRAMES
; i
++)
1652 gen_eth_frame(umem
, i
* opt_xsk_frame_size
);
1655 if (opt_num_xsks
> 1 && opt_bench
!= BENCH_TXONLY
)
1656 enter_xsks_into_map(obj
);
1658 if (opt_reduced_cap
) {
1659 ret
= recv_xsks_map_fd(&xsks_map_fd
);
1661 fprintf(stderr
, "Error %d receiving xsks_map_fd\n", ret
);
1662 exit_with_error(ret
);
1665 ret
= xsk_socket__update_xskmap(xsks
[0]->xsk
, xsks_map_fd
);
1667 fprintf(stderr
, "Update of BPF map failed(%d)\n", ret
);
1668 exit_with_error(ret
);
1673 signal(SIGINT
, int_exit
);
1674 signal(SIGTERM
, int_exit
);
1675 signal(SIGABRT
, int_exit
);
1677 setlocale(LC_ALL
, "");
1680 ret
= pthread_create(&pt
, NULL
, poller
, NULL
);
1682 exit_with_error(ret
);
1685 prev_time
= get_nsecs();
1686 start_time
= prev_time
;
1688 if (opt_bench
== BENCH_RXDROP
)
1690 else if (opt_bench
== BENCH_TXONLY
)
1695 benchmark_done
= true;
1698 pthread_join(pt
, NULL
);