1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation. */
9 #include <linux/if_link.h>
10 #include <linux/if_xdp.h>
11 #include <linux/if_ether.h>
18 #include <net/ethernet.h>
19 #include <sys/resource.h>
20 #include <sys/socket.h>
26 #include <sys/types.h>
29 #include "bpf/libbpf.h"
47 #define NUM_FRAMES 131072
48 #define FRAME_HEADROOM 0
49 #define FRAME_SHIFT 11
50 #define FRAME_SIZE 2048
51 #define NUM_DESCS 1024
54 #define FQ_NUM_DESCS 1024
55 #define CQ_NUM_DESCS 1024
57 #define DEBUG_HEXDUMP 0
62 static unsigned long prev_time
;
70 static enum benchmark_type opt_bench
= BENCH_RXDROP
;
71 static u32 opt_xdp_flags
;
72 static const char *opt_if
= "";
73 static int opt_ifindex
;
76 static int opt_shared_packet_buffer
;
77 static int opt_interval
= 1;
78 static u32 opt_xdp_bind_flags
;
80 struct xdp_umem_uqueue
{
93 struct xdp_umem_uqueue fq
;
94 struct xdp_umem_uqueue cq
;
105 struct xdp_desc
*ring
;
110 struct xdp_uqueue rx
;
111 struct xdp_uqueue tx
;
113 struct xdp_umem
*umem
;
115 unsigned long rx_npkts
;
116 unsigned long tx_npkts
;
117 unsigned long prev_rx_npkts
;
118 unsigned long prev_tx_npkts
;
122 static int num_socks
;
123 struct xdpsock
*xsks
[MAX_SOCKS
];
125 static unsigned long get_nsecs(void)
129 clock_gettime(CLOCK_MONOTONIC
, &ts
);
130 return ts
.tv_sec
* 1000000000UL + ts
.tv_nsec
;
133 static void dump_stats(void);
135 #define lassert(expr) \
138 fprintf(stderr, "%s:%s:%i: Assertion failed: " \
139 #expr ": errno: %d/\"%s\"\n", \
140 __FILE__, __func__, __LINE__, \
141 errno, strerror(errno)); \
143 exit(EXIT_FAILURE); \
147 #define barrier() __asm__ __volatile__("": : :"memory")
149 #define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
150 #define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
152 #define u_smp_rmb() barrier()
153 #define u_smp_wmb() barrier()
155 #define likely(x) __builtin_expect(!!(x), 1)
156 #define unlikely(x) __builtin_expect(!!(x), 0)
158 static const char pkt_data
[] =
159 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
160 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
161 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
162 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
164 static inline u32
umem_nb_free(struct xdp_umem_uqueue
*q
, u32 nb
)
166 u32 free_entries
= q
->cached_cons
- q
->cached_prod
;
168 if (free_entries
>= nb
)
171 /* Refresh the local tail pointer */
172 q
->cached_cons
= *q
->consumer
+ q
->size
;
174 return q
->cached_cons
- q
->cached_prod
;
177 static inline u32
xq_nb_free(struct xdp_uqueue
*q
, u32 ndescs
)
179 u32 free_entries
= q
->cached_cons
- q
->cached_prod
;
181 if (free_entries
>= ndescs
)
184 /* Refresh the local tail pointer */
185 q
->cached_cons
= *q
->consumer
+ q
->size
;
186 return q
->cached_cons
- q
->cached_prod
;
189 static inline u32
umem_nb_avail(struct xdp_umem_uqueue
*q
, u32 nb
)
191 u32 entries
= q
->cached_prod
- q
->cached_cons
;
194 q
->cached_prod
= *q
->producer
;
195 entries
= q
->cached_prod
- q
->cached_cons
;
198 return (entries
> nb
) ? nb
: entries
;
201 static inline u32
xq_nb_avail(struct xdp_uqueue
*q
, u32 ndescs
)
203 u32 entries
= q
->cached_prod
- q
->cached_cons
;
206 q
->cached_prod
= *q
->producer
;
207 entries
= q
->cached_prod
- q
->cached_cons
;
210 return (entries
> ndescs
) ? ndescs
: entries
;
213 static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue
*fq
,
219 if (umem_nb_free(fq
, nb
) < nb
)
222 for (i
= 0; i
< nb
; i
++) {
223 u32 idx
= fq
->cached_prod
++ & fq
->mask
;
225 fq
->ring
[idx
] = d
[i
].addr
;
230 *fq
->producer
= fq
->cached_prod
;
235 static inline int umem_fill_to_kernel(struct xdp_umem_uqueue
*fq
, u64
*d
,
240 if (umem_nb_free(fq
, nb
) < nb
)
243 for (i
= 0; i
< nb
; i
++) {
244 u32 idx
= fq
->cached_prod
++ & fq
->mask
;
246 fq
->ring
[idx
] = d
[i
];
251 *fq
->producer
= fq
->cached_prod
;
256 static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue
*cq
,
259 u32 idx
, i
, entries
= umem_nb_avail(cq
, nb
);
263 for (i
= 0; i
< entries
; i
++) {
264 idx
= cq
->cached_cons
++ & cq
->mask
;
265 d
[i
] = cq
->ring
[idx
];
271 *cq
->consumer
= cq
->cached_cons
;
277 static inline void *xq_get_data(struct xdpsock
*xsk
, u64 addr
)
279 return &xsk
->umem
->frames
[addr
];
282 static inline int xq_enq(struct xdp_uqueue
*uq
,
283 const struct xdp_desc
*descs
,
286 struct xdp_desc
*r
= uq
->ring
;
289 if (xq_nb_free(uq
, ndescs
) < ndescs
)
292 for (i
= 0; i
< ndescs
; i
++) {
293 u32 idx
= uq
->cached_prod
++ & uq
->mask
;
295 r
[idx
].addr
= descs
[i
].addr
;
296 r
[idx
].len
= descs
[i
].len
;
301 *uq
->producer
= uq
->cached_prod
;
305 static inline int xq_enq_tx_only(struct xdp_uqueue
*uq
,
306 unsigned int id
, unsigned int ndescs
)
308 struct xdp_desc
*r
= uq
->ring
;
311 if (xq_nb_free(uq
, ndescs
) < ndescs
)
314 for (i
= 0; i
< ndescs
; i
++) {
315 u32 idx
= uq
->cached_prod
++ & uq
->mask
;
317 r
[idx
].addr
= (id
+ i
) << FRAME_SHIFT
;
318 r
[idx
].len
= sizeof(pkt_data
) - 1;
323 *uq
->producer
= uq
->cached_prod
;
327 static inline int xq_deq(struct xdp_uqueue
*uq
,
328 struct xdp_desc
*descs
,
331 struct xdp_desc
*r
= uq
->ring
;
335 entries
= xq_nb_avail(uq
, ndescs
);
339 for (i
= 0; i
< entries
; i
++) {
340 idx
= uq
->cached_cons
++ & uq
->mask
;
347 *uq
->consumer
= uq
->cached_cons
;
353 static void swap_mac_addresses(void *data
)
355 struct ether_header
*eth
= (struct ether_header
*)data
;
356 struct ether_addr
*src_addr
= (struct ether_addr
*)ð
->ether_shost
;
357 struct ether_addr
*dst_addr
= (struct ether_addr
*)ð
->ether_dhost
;
358 struct ether_addr tmp
;
361 *src_addr
= *dst_addr
;
365 static void hex_dump(void *pkt
, size_t length
, u64 addr
)
367 const unsigned char *address
= (unsigned char *)pkt
;
368 const unsigned char *line
= address
;
369 size_t line_size
= 32;
377 sprintf(buf
, "addr=%llu", addr
);
378 printf("length = %zu\n", length
);
379 printf("%s | ", buf
);
380 while (length
-- > 0) {
381 printf("%02X ", *address
++);
382 if (!(++i
% line_size
) || (length
== 0 && i
% line_size
)) {
384 while (i
++ % line_size
)
387 printf(" | "); /* right close */
388 while (line
< address
) {
390 printf("%c", (c
< 33 || c
== 255) ? 0x2E : c
);
394 printf("%s | ", buf
);
400 static size_t gen_eth_frame(char *frame
)
402 memcpy(frame
, pkt_data
, sizeof(pkt_data
) - 1);
403 return sizeof(pkt_data
) - 1;
406 static struct xdp_umem
*xdp_umem_configure(int sfd
)
408 int fq_size
= FQ_NUM_DESCS
, cq_size
= CQ_NUM_DESCS
;
409 struct xdp_mmap_offsets off
;
410 struct xdp_umem_reg mr
;
411 struct xdp_umem
*umem
;
415 umem
= calloc(1, sizeof(*umem
));
418 lassert(posix_memalign(&bufs
, getpagesize(), /* PAGE_SIZE aligned */
419 NUM_FRAMES
* FRAME_SIZE
) == 0);
421 mr
.addr
= (__u64
)bufs
;
422 mr
.len
= NUM_FRAMES
* FRAME_SIZE
;
423 mr
.chunk_size
= FRAME_SIZE
;
424 mr
.headroom
= FRAME_HEADROOM
;
426 lassert(setsockopt(sfd
, SOL_XDP
, XDP_UMEM_REG
, &mr
, sizeof(mr
)) == 0);
427 lassert(setsockopt(sfd
, SOL_XDP
, XDP_UMEM_FILL_RING
, &fq_size
,
429 lassert(setsockopt(sfd
, SOL_XDP
, XDP_UMEM_COMPLETION_RING
, &cq_size
,
432 optlen
= sizeof(off
);
433 lassert(getsockopt(sfd
, SOL_XDP
, XDP_MMAP_OFFSETS
, &off
,
436 umem
->fq
.map
= mmap(0, off
.fr
.desc
+
437 FQ_NUM_DESCS
* sizeof(u64
),
438 PROT_READ
| PROT_WRITE
,
439 MAP_SHARED
| MAP_POPULATE
, sfd
,
440 XDP_UMEM_PGOFF_FILL_RING
);
441 lassert(umem
->fq
.map
!= MAP_FAILED
);
443 umem
->fq
.mask
= FQ_NUM_DESCS
- 1;
444 umem
->fq
.size
= FQ_NUM_DESCS
;
445 umem
->fq
.producer
= umem
->fq
.map
+ off
.fr
.producer
;
446 umem
->fq
.consumer
= umem
->fq
.map
+ off
.fr
.consumer
;
447 umem
->fq
.ring
= umem
->fq
.map
+ off
.fr
.desc
;
448 umem
->fq
.cached_cons
= FQ_NUM_DESCS
;
450 umem
->cq
.map
= mmap(0, off
.cr
.desc
+
451 CQ_NUM_DESCS
* sizeof(u64
),
452 PROT_READ
| PROT_WRITE
,
453 MAP_SHARED
| MAP_POPULATE
, sfd
,
454 XDP_UMEM_PGOFF_COMPLETION_RING
);
455 lassert(umem
->cq
.map
!= MAP_FAILED
);
457 umem
->cq
.mask
= CQ_NUM_DESCS
- 1;
458 umem
->cq
.size
= CQ_NUM_DESCS
;
459 umem
->cq
.producer
= umem
->cq
.map
+ off
.cr
.producer
;
460 umem
->cq
.consumer
= umem
->cq
.map
+ off
.cr
.consumer
;
461 umem
->cq
.ring
= umem
->cq
.map
+ off
.cr
.desc
;
466 if (opt_bench
== BENCH_TXONLY
) {
469 for (i
= 0; i
< NUM_FRAMES
* FRAME_SIZE
; i
+= FRAME_SIZE
)
470 (void)gen_eth_frame(&umem
->frames
[i
]);
476 static struct xdpsock
*xsk_configure(struct xdp_umem
*umem
)
478 struct sockaddr_xdp sxdp
= {};
479 struct xdp_mmap_offsets off
;
480 int sfd
, ndescs
= NUM_DESCS
;
486 sfd
= socket(PF_XDP
, SOCK_RAW
, 0);
489 xsk
= calloc(1, sizeof(*xsk
));
493 xsk
->outstanding_tx
= 0;
497 xsk
->umem
= xdp_umem_configure(sfd
);
502 lassert(setsockopt(sfd
, SOL_XDP
, XDP_RX_RING
,
503 &ndescs
, sizeof(int)) == 0);
504 lassert(setsockopt(sfd
, SOL_XDP
, XDP_TX_RING
,
505 &ndescs
, sizeof(int)) == 0);
506 optlen
= sizeof(off
);
507 lassert(getsockopt(sfd
, SOL_XDP
, XDP_MMAP_OFFSETS
, &off
,
511 xsk
->rx
.map
= mmap(NULL
,
513 NUM_DESCS
* sizeof(struct xdp_desc
),
514 PROT_READ
| PROT_WRITE
,
515 MAP_SHARED
| MAP_POPULATE
, sfd
,
517 lassert(xsk
->rx
.map
!= MAP_FAILED
);
520 for (i
= 0; i
< NUM_DESCS
* FRAME_SIZE
; i
+= FRAME_SIZE
)
521 lassert(umem_fill_to_kernel(&xsk
->umem
->fq
, &i
, 1)
526 xsk
->tx
.map
= mmap(NULL
,
528 NUM_DESCS
* sizeof(struct xdp_desc
),
529 PROT_READ
| PROT_WRITE
,
530 MAP_SHARED
| MAP_POPULATE
, sfd
,
532 lassert(xsk
->tx
.map
!= MAP_FAILED
);
534 xsk
->rx
.mask
= NUM_DESCS
- 1;
535 xsk
->rx
.size
= NUM_DESCS
;
536 xsk
->rx
.producer
= xsk
->rx
.map
+ off
.rx
.producer
;
537 xsk
->rx
.consumer
= xsk
->rx
.map
+ off
.rx
.consumer
;
538 xsk
->rx
.ring
= xsk
->rx
.map
+ off
.rx
.desc
;
540 xsk
->tx
.mask
= NUM_DESCS
- 1;
541 xsk
->tx
.size
= NUM_DESCS
;
542 xsk
->tx
.producer
= xsk
->tx
.map
+ off
.tx
.producer
;
543 xsk
->tx
.consumer
= xsk
->tx
.map
+ off
.tx
.consumer
;
544 xsk
->tx
.ring
= xsk
->tx
.map
+ off
.tx
.desc
;
545 xsk
->tx
.cached_cons
= NUM_DESCS
;
547 sxdp
.sxdp_family
= PF_XDP
;
548 sxdp
.sxdp_ifindex
= opt_ifindex
;
549 sxdp
.sxdp_queue_id
= opt_queue
;
552 sxdp
.sxdp_flags
= XDP_SHARED_UMEM
;
553 sxdp
.sxdp_shared_umem_fd
= umem
->fd
;
555 sxdp
.sxdp_flags
= opt_xdp_bind_flags
;
558 lassert(bind(sfd
, (struct sockaddr
*)&sxdp
, sizeof(sxdp
)) == 0);
563 static void print_benchmark(bool running
)
565 const char *bench_str
= "INVALID";
567 if (opt_bench
== BENCH_RXDROP
)
568 bench_str
= "rxdrop";
569 else if (opt_bench
== BENCH_TXONLY
)
570 bench_str
= "txonly";
571 else if (opt_bench
== BENCH_L2FWD
)
574 printf("%s:%d %s ", opt_if
, opt_queue
, bench_str
);
575 if (opt_xdp_flags
& XDP_FLAGS_SKB_MODE
)
577 else if (opt_xdp_flags
& XDP_FLAGS_DRV_MODE
)
586 printf("running...");
591 static void dump_stats(void)
593 unsigned long now
= get_nsecs();
594 long dt
= now
- prev_time
;
599 for (i
= 0; i
< num_socks
; i
++) {
600 char *fmt
= "%-15s %'-11.0f %'-11lu\n";
601 double rx_pps
, tx_pps
;
603 rx_pps
= (xsks
[i
]->rx_npkts
- xsks
[i
]->prev_rx_npkts
) *
605 tx_pps
= (xsks
[i
]->tx_npkts
- xsks
[i
]->prev_tx_npkts
) *
608 printf("\n sock%d@", i
);
609 print_benchmark(false);
612 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
614 printf(fmt
, "rx", rx_pps
, xsks
[i
]->rx_npkts
);
615 printf(fmt
, "tx", tx_pps
, xsks
[i
]->tx_npkts
);
617 xsks
[i
]->prev_rx_npkts
= xsks
[i
]->rx_npkts
;
618 xsks
[i
]->prev_tx_npkts
= xsks
[i
]->tx_npkts
;
622 static void *poller(void *arg
)
633 static void int_exit(int sig
)
637 bpf_set_link_xdp_fd(opt_ifindex
, -1, opt_xdp_flags
);
641 static struct option long_options
[] = {
642 {"rxdrop", no_argument
, 0, 'r'},
643 {"txonly", no_argument
, 0, 't'},
644 {"l2fwd", no_argument
, 0, 'l'},
645 {"interface", required_argument
, 0, 'i'},
646 {"queue", required_argument
, 0, 'q'},
647 {"poll", no_argument
, 0, 'p'},
648 {"shared-buffer", no_argument
, 0, 's'},
649 {"xdp-skb", no_argument
, 0, 'S'},
650 {"xdp-native", no_argument
, 0, 'N'},
651 {"interval", required_argument
, 0, 'n'},
655 static void usage(const char *prog
)
658 " Usage: %s [OPTIONS]\n"
660 " -r, --rxdrop Discard all incoming packets (default)\n"
661 " -t, --txonly Only send packets\n"
662 " -l, --l2fwd MAC swap L2 forwarding\n"
663 " -i, --interface=n Run on interface n\n"
664 " -q, --queue=n Use queue n (default 0)\n"
665 " -p, --poll Use poll syscall\n"
666 " -s, --shared-buffer Use shared packet buffer\n"
667 " -S, --xdp-skb=n Use XDP skb-mod\n"
668 " -N, --xdp-native=n Enfore XDP native mode\n"
669 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
671 fprintf(stderr
, str
, prog
);
675 static void parse_command_line(int argc
, char **argv
)
682 c
= getopt_long(argc
, argv
, "rtli:q:psSNn:", long_options
,
689 opt_bench
= BENCH_RXDROP
;
692 opt_bench
= BENCH_TXONLY
;
695 opt_bench
= BENCH_L2FWD
;
701 opt_queue
= atoi(optarg
);
704 opt_shared_packet_buffer
= 1;
710 opt_xdp_flags
|= XDP_FLAGS_SKB_MODE
;
711 opt_xdp_bind_flags
|= XDP_COPY
;
714 opt_xdp_flags
|= XDP_FLAGS_DRV_MODE
;
717 opt_interval
= atoi(optarg
);
720 usage(basename(argv
[0]));
724 opt_ifindex
= if_nametoindex(opt_if
);
726 fprintf(stderr
, "ERROR: interface \"%s\" does not exist\n",
728 usage(basename(argv
[0]));
732 static void kick_tx(int fd
)
736 ret
= sendto(fd
, NULL
, 0, MSG_DONTWAIT
, NULL
, 0);
737 if (ret
>= 0 || errno
== ENOBUFS
|| errno
== EAGAIN
|| errno
== EBUSY
)
742 static inline void complete_tx_l2fwd(struct xdpsock
*xsk
)
744 u64 descs
[BATCH_SIZE
];
748 if (!xsk
->outstanding_tx
)
752 ndescs
= (xsk
->outstanding_tx
> BATCH_SIZE
) ? BATCH_SIZE
:
755 /* re-add completed Tx buffers */
756 rcvd
= umem_complete_from_kernel(&xsk
->umem
->cq
, descs
, ndescs
);
758 umem_fill_to_kernel(&xsk
->umem
->fq
, descs
, rcvd
);
759 xsk
->outstanding_tx
-= rcvd
;
760 xsk
->tx_npkts
+= rcvd
;
764 static inline void complete_tx_only(struct xdpsock
*xsk
)
766 u64 descs
[BATCH_SIZE
];
769 if (!xsk
->outstanding_tx
)
774 rcvd
= umem_complete_from_kernel(&xsk
->umem
->cq
, descs
, BATCH_SIZE
);
776 xsk
->outstanding_tx
-= rcvd
;
777 xsk
->tx_npkts
+= rcvd
;
781 static void rx_drop(struct xdpsock
*xsk
)
783 struct xdp_desc descs
[BATCH_SIZE
];
784 unsigned int rcvd
, i
;
786 rcvd
= xq_deq(&xsk
->rx
, descs
, BATCH_SIZE
);
790 for (i
= 0; i
< rcvd
; i
++) {
791 char *pkt
= xq_get_data(xsk
, descs
[i
].addr
);
793 hex_dump(pkt
, descs
[i
].len
, descs
[i
].addr
);
796 xsk
->rx_npkts
+= rcvd
;
798 umem_fill_to_kernel_ex(&xsk
->umem
->fq
, descs
, rcvd
);
801 static void rx_drop_all(void)
803 struct pollfd fds
[MAX_SOCKS
+ 1];
804 int i
, ret
, timeout
, nfds
= 1;
806 memset(fds
, 0, sizeof(fds
));
808 for (i
= 0; i
< num_socks
; i
++) {
809 fds
[i
].fd
= xsks
[i
]->sfd
;
810 fds
[i
].events
= POLLIN
;
811 timeout
= 1000; /* 1sn */
816 ret
= poll(fds
, nfds
, timeout
);
821 for (i
= 0; i
< num_socks
; i
++)
826 static void tx_only(struct xdpsock
*xsk
)
828 int timeout
, ret
, nfds
= 1;
829 struct pollfd fds
[nfds
+ 1];
830 unsigned int idx
= 0;
832 memset(fds
, 0, sizeof(fds
));
833 fds
[0].fd
= xsk
->sfd
;
834 fds
[0].events
= POLLOUT
;
835 timeout
= 1000; /* 1sn */
839 ret
= poll(fds
, nfds
, timeout
);
843 if (fds
[0].fd
!= xsk
->sfd
||
844 !(fds
[0].revents
& POLLOUT
))
848 if (xq_nb_free(&xsk
->tx
, BATCH_SIZE
) >= BATCH_SIZE
) {
849 lassert(xq_enq_tx_only(&xsk
->tx
, idx
, BATCH_SIZE
) == 0);
851 xsk
->outstanding_tx
+= BATCH_SIZE
;
856 complete_tx_only(xsk
);
860 static void l2fwd(struct xdpsock
*xsk
)
863 struct xdp_desc descs
[BATCH_SIZE
];
864 unsigned int rcvd
, i
;
868 complete_tx_l2fwd(xsk
);
870 rcvd
= xq_deq(&xsk
->rx
, descs
, BATCH_SIZE
);
875 for (i
= 0; i
< rcvd
; i
++) {
876 char *pkt
= xq_get_data(xsk
, descs
[i
].addr
);
878 swap_mac_addresses(pkt
);
880 hex_dump(pkt
, descs
[i
].len
, descs
[i
].addr
);
883 xsk
->rx_npkts
+= rcvd
;
885 ret
= xq_enq(&xsk
->tx
, descs
, rcvd
);
887 xsk
->outstanding_tx
+= rcvd
;
891 int main(int argc
, char **argv
)
893 struct rlimit r
= {RLIM_INFINITY
, RLIM_INFINITY
};
894 struct bpf_prog_load_attr prog_load_attr
= {
895 .prog_type
= BPF_PROG_TYPE_XDP
,
897 int prog_fd
, qidconf_map
, xsks_map
;
898 struct bpf_object
*obj
;
899 char xdp_filename
[256];
904 parse_command_line(argc
, argv
);
906 if (setrlimit(RLIMIT_MEMLOCK
, &r
)) {
907 fprintf(stderr
, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
912 snprintf(xdp_filename
, sizeof(xdp_filename
), "%s_kern.o", argv
[0]);
913 prog_load_attr
.file
= xdp_filename
;
915 if (bpf_prog_load_xattr(&prog_load_attr
, &obj
, &prog_fd
))
918 fprintf(stderr
, "ERROR: no program found: %s\n",
923 map
= bpf_object__find_map_by_name(obj
, "qidconf_map");
924 qidconf_map
= bpf_map__fd(map
);
925 if (qidconf_map
< 0) {
926 fprintf(stderr
, "ERROR: no qidconf map found: %s\n",
927 strerror(qidconf_map
));
931 map
= bpf_object__find_map_by_name(obj
, "xsks_map");
932 xsks_map
= bpf_map__fd(map
);
934 fprintf(stderr
, "ERROR: no xsks map found: %s\n",
939 if (bpf_set_link_xdp_fd(opt_ifindex
, prog_fd
, opt_xdp_flags
) < 0) {
940 fprintf(stderr
, "ERROR: link set xdp fd failed\n");
944 ret
= bpf_map_update_elem(qidconf_map
, &key
, &opt_queue
, 0);
946 fprintf(stderr
, "ERROR: bpf_map_update_elem qidconf\n");
950 /* Create sockets... */
951 xsks
[num_socks
++] = xsk_configure(NULL
);
954 for (i
= 0; i
< MAX_SOCKS
- 1; i
++)
955 xsks
[num_socks
++] = xsk_configure(xsks
[0]->umem
);
958 /* ...and insert them into the map. */
959 for (i
= 0; i
< num_socks
; i
++) {
961 ret
= bpf_map_update_elem(xsks_map
, &key
, &xsks
[i
]->sfd
, 0);
963 fprintf(stderr
, "ERROR: bpf_map_update_elem %d\n", i
);
968 signal(SIGINT
, int_exit
);
969 signal(SIGTERM
, int_exit
);
970 signal(SIGABRT
, int_exit
);
972 setlocale(LC_ALL
, "");
974 ret
= pthread_create(&pt
, NULL
, poller
, NULL
);
977 prev_time
= get_nsecs();
979 if (opt_bench
== BENCH_RXDROP
)
981 else if (opt_bench
== BENCH_TXONLY
)