1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
5 #include <linux/if_tun.h>
12 #define CHECK_FLOW_KEYS(desc, got, expected) \
13 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
17 "addr_proto=0x%x/0x%x " \
19 "is_first_frag=%u/%u " \
21 "ip_proto=0x%x/0x%x " \
22 "n_proto=0x%x/0x%x " \
23 "flow_label=0x%x/0x%x " \
26 got.nhoff, expected.nhoff, \
27 got.thoff, expected.thoff, \
28 got.addr_proto, expected.addr_proto, \
29 got.is_frag, expected.is_frag, \
30 got.is_first_frag, expected.is_first_frag, \
31 got.is_encap, expected.is_encap, \
32 got.ip_proto, expected.ip_proto, \
33 got.n_proto, expected.n_proto, \
34 got.flow_label, expected.flow_label, \
35 got.sport, expected.sport, \
36 got.dport, expected.dport)
47 struct iphdr iph_inner
;
51 struct svlan_ipv4_pkt
{
65 struct ipv6_frag_pkt
{
72 __be32 identification
;
77 struct dvlan_ipv6_pkt
{
91 struct svlan_ipv4_pkt svlan_ipv4
;
94 struct ipv6_frag_pkt ipv6_frag
;
95 struct dvlan_ipv6_pkt dvlan_ipv6
;
97 struct bpf_flow_keys keys
;
103 struct test tests
[] = {
107 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IP
),
109 .iph
.protocol
= IPPROTO_TCP
,
110 .iph
.tot_len
= __bpf_constant_htons(MAGIC_BYTES
),
117 .thoff
= ETH_HLEN
+ sizeof(struct iphdr
),
118 .addr_proto
= ETH_P_IP
,
119 .ip_proto
= IPPROTO_TCP
,
120 .n_proto
= __bpf_constant_htons(ETH_P_IP
),
128 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IPV6
),
129 .iph
.nexthdr
= IPPROTO_TCP
,
130 .iph
.payload_len
= __bpf_constant_htons(MAGIC_BYTES
),
137 .thoff
= ETH_HLEN
+ sizeof(struct ipv6hdr
),
138 .addr_proto
= ETH_P_IPV6
,
139 .ip_proto
= IPPROTO_TCP
,
140 .n_proto
= __bpf_constant_htons(ETH_P_IPV6
),
146 .name
= "802.1q-ipv4",
148 .eth
.h_proto
= __bpf_constant_htons(ETH_P_8021Q
),
149 .vlan_proto
= __bpf_constant_htons(ETH_P_IP
),
151 .iph
.protocol
= IPPROTO_TCP
,
152 .iph
.tot_len
= __bpf_constant_htons(MAGIC_BYTES
),
158 .nhoff
= ETH_HLEN
+ VLAN_HLEN
,
159 .thoff
= ETH_HLEN
+ VLAN_HLEN
+ sizeof(struct iphdr
),
160 .addr_proto
= ETH_P_IP
,
161 .ip_proto
= IPPROTO_TCP
,
162 .n_proto
= __bpf_constant_htons(ETH_P_IP
),
168 .name
= "802.1ad-ipv6",
170 .eth
.h_proto
= __bpf_constant_htons(ETH_P_8021AD
),
171 .vlan_proto
= __bpf_constant_htons(ETH_P_8021Q
),
172 .vlan_proto2
= __bpf_constant_htons(ETH_P_IPV6
),
173 .iph
.nexthdr
= IPPROTO_TCP
,
174 .iph
.payload_len
= __bpf_constant_htons(MAGIC_BYTES
),
180 .nhoff
= ETH_HLEN
+ VLAN_HLEN
* 2,
181 .thoff
= ETH_HLEN
+ VLAN_HLEN
* 2 +
182 sizeof(struct ipv6hdr
),
183 .addr_proto
= ETH_P_IPV6
,
184 .ip_proto
= IPPROTO_TCP
,
185 .n_proto
= __bpf_constant_htons(ETH_P_IPV6
),
193 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IP
),
195 .iph
.protocol
= IPPROTO_TCP
,
196 .iph
.tot_len
= __bpf_constant_htons(MAGIC_BYTES
),
197 .iph
.frag_off
= __bpf_constant_htons(IP_MF
),
203 .flags
= BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG
,
205 .thoff
= ETH_HLEN
+ sizeof(struct iphdr
),
206 .addr_proto
= ETH_P_IP
,
207 .ip_proto
= IPPROTO_TCP
,
208 .n_proto
= __bpf_constant_htons(ETH_P_IP
),
210 .is_first_frag
= true,
214 .flags
= BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG
,
217 .name
= "ipv4-no-frag",
219 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IP
),
221 .iph
.protocol
= IPPROTO_TCP
,
222 .iph
.tot_len
= __bpf_constant_htons(MAGIC_BYTES
),
223 .iph
.frag_off
= __bpf_constant_htons(IP_MF
),
230 .thoff
= ETH_HLEN
+ sizeof(struct iphdr
),
231 .addr_proto
= ETH_P_IP
,
232 .ip_proto
= IPPROTO_TCP
,
233 .n_proto
= __bpf_constant_htons(ETH_P_IP
),
235 .is_first_frag
= true,
241 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IPV6
),
242 .iph
.nexthdr
= IPPROTO_FRAGMENT
,
243 .iph
.payload_len
= __bpf_constant_htons(MAGIC_BYTES
),
244 .ipf
.nexthdr
= IPPROTO_TCP
,
250 .flags
= BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG
,
252 .thoff
= ETH_HLEN
+ sizeof(struct ipv6hdr
) +
253 sizeof(struct frag_hdr
),
254 .addr_proto
= ETH_P_IPV6
,
255 .ip_proto
= IPPROTO_TCP
,
256 .n_proto
= __bpf_constant_htons(ETH_P_IPV6
),
258 .is_first_frag
= true,
262 .flags
= BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG
,
265 .name
= "ipv6-no-frag",
267 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IPV6
),
268 .iph
.nexthdr
= IPPROTO_FRAGMENT
,
269 .iph
.payload_len
= __bpf_constant_htons(MAGIC_BYTES
),
270 .ipf
.nexthdr
= IPPROTO_TCP
,
277 .thoff
= ETH_HLEN
+ sizeof(struct ipv6hdr
) +
278 sizeof(struct frag_hdr
),
279 .addr_proto
= ETH_P_IPV6
,
280 .ip_proto
= IPPROTO_TCP
,
281 .n_proto
= __bpf_constant_htons(ETH_P_IPV6
),
283 .is_first_frag
= true,
287 .name
= "ipv6-flow-label",
289 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IPV6
),
290 .iph
.nexthdr
= IPPROTO_TCP
,
291 .iph
.payload_len
= __bpf_constant_htons(MAGIC_BYTES
),
292 .iph
.flow_lbl
= { 0xb, 0xee, 0xef },
299 .thoff
= ETH_HLEN
+ sizeof(struct ipv6hdr
),
300 .addr_proto
= ETH_P_IPV6
,
301 .ip_proto
= IPPROTO_TCP
,
302 .n_proto
= __bpf_constant_htons(ETH_P_IPV6
),
305 .flow_label
= __bpf_constant_htonl(0xbeeef),
309 .name
= "ipv6-no-flow-label",
311 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IPV6
),
312 .iph
.nexthdr
= IPPROTO_TCP
,
313 .iph
.payload_len
= __bpf_constant_htons(MAGIC_BYTES
),
314 .iph
.flow_lbl
= { 0xb, 0xee, 0xef },
320 .flags
= BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
,
322 .thoff
= ETH_HLEN
+ sizeof(struct ipv6hdr
),
323 .addr_proto
= ETH_P_IPV6
,
324 .ip_proto
= IPPROTO_TCP
,
325 .n_proto
= __bpf_constant_htons(ETH_P_IPV6
),
326 .flow_label
= __bpf_constant_htonl(0xbeeef),
328 .flags
= BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
,
331 .name
= "ipip-encap",
333 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IP
),
335 .iph
.protocol
= IPPROTO_IPIP
,
336 .iph
.tot_len
= __bpf_constant_htons(MAGIC_BYTES
),
338 .iph_inner
.protocol
= IPPROTO_TCP
,
340 __bpf_constant_htons(MAGIC_BYTES
) -
341 sizeof(struct iphdr
),
348 .thoff
= ETH_HLEN
+ sizeof(struct iphdr
) +
349 sizeof(struct iphdr
),
350 .addr_proto
= ETH_P_IP
,
351 .ip_proto
= IPPROTO_TCP
,
352 .n_proto
= __bpf_constant_htons(ETH_P_IP
),
359 .name
= "ipip-no-encap",
361 .eth
.h_proto
= __bpf_constant_htons(ETH_P_IP
),
363 .iph
.protocol
= IPPROTO_IPIP
,
364 .iph
.tot_len
= __bpf_constant_htons(MAGIC_BYTES
),
366 .iph_inner
.protocol
= IPPROTO_TCP
,
368 __bpf_constant_htons(MAGIC_BYTES
) -
369 sizeof(struct iphdr
),
375 .flags
= BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP
,
377 .thoff
= ETH_HLEN
+ sizeof(struct iphdr
),
378 .addr_proto
= ETH_P_IP
,
379 .ip_proto
= IPPROTO_IPIP
,
380 .n_proto
= __bpf_constant_htons(ETH_P_IP
),
383 .flags
= BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP
,
387 static int create_tap(const char *ifname
)
390 .ifr_flags
= IFF_TAP
| IFF_NO_PI
| IFF_NAPI
| IFF_NAPI_FRAGS
,
394 strncpy(ifr
.ifr_name
, ifname
, sizeof(ifr
.ifr_name
));
396 fd
= open("/dev/net/tun", O_RDWR
);
400 ret
= ioctl(fd
, TUNSETIFF
, &ifr
);
407 static int tx_tap(int fd
, void *pkt
, size_t len
)
409 struct iovec iov
[] = {
415 return writev(fd
, iov
, ARRAY_SIZE(iov
));
418 static int ifup(const char *ifname
)
420 struct ifreq ifr
= {};
423 strncpy(ifr
.ifr_name
, ifname
, sizeof(ifr
.ifr_name
));
425 sk
= socket(PF_INET
, SOCK_DGRAM
, 0);
429 ret
= ioctl(sk
, SIOCGIFFLAGS
, &ifr
);
435 ifr
.ifr_flags
|= IFF_UP
;
436 ret
= ioctl(sk
, SIOCSIFFLAGS
, &ifr
);
446 void test_flow_dissector(void)
448 int i
, err
, prog_fd
, keys_fd
= -1, tap_fd
;
449 struct bpf_object
*obj
;
452 err
= bpf_flow_load(&obj
, "./bpf_flow.o", "flow_dissector",
453 "jmp_table", "last_dissection", &prog_fd
, &keys_fd
);
457 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
458 struct bpf_flow_keys flow_keys
;
459 struct bpf_prog_test_run_attr tattr
= {
461 .data_in
= &tests
[i
].pkt
,
462 .data_size_in
= sizeof(tests
[i
].pkt
),
463 .data_out
= &flow_keys
,
465 static struct bpf_flow_keys ctx
= {};
467 if (tests
[i
].flags
) {
469 tattr
.ctx_size_in
= sizeof(ctx
);
470 ctx
.flags
= tests
[i
].flags
;
473 err
= bpf_prog_test_run_xattr(&tattr
);
474 CHECK_ATTR(tattr
.data_size_out
!= sizeof(flow_keys
) ||
475 err
|| tattr
.retval
!= 1,
477 "err %d errno %d retval %d duration %d size %u/%lu\n",
478 err
, errno
, tattr
.retval
, tattr
.duration
,
479 tattr
.data_size_out
, sizeof(flow_keys
));
480 CHECK_FLOW_KEYS(tests
[i
].name
, flow_keys
, tests
[i
].keys
);
483 /* Do the same tests but for skb-less flow dissector.
484 * We use a known path in the net/tun driver that calls
485 * eth_get_headlen and we manually export bpf_flow_keys
486 * via BPF map in this case.
489 err
= bpf_prog_attach(prog_fd
, 0, BPF_FLOW_DISSECTOR
, 0);
490 CHECK(err
, "bpf_prog_attach", "err %d errno %d\n", err
, errno
);
492 tap_fd
= create_tap("tap0");
493 CHECK(tap_fd
< 0, "create_tap", "tap_fd %d errno %d\n", tap_fd
, errno
);
495 CHECK(err
, "ifup", "err %d errno %d\n", err
, errno
);
497 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
498 /* Keep in sync with 'flags' from eth_get_headlen. */
499 __u32 eth_get_headlen_flags
=
500 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG
;
501 struct bpf_prog_test_run_attr tattr
= {};
502 struct bpf_flow_keys flow_keys
= {};
503 __u32 key
= (__u32
)(tests
[i
].keys
.sport
) << 16 |
506 /* For skb-less case we can't pass input flags; run
507 * only the tests that have a matching set of flags.
510 if (tests
[i
].flags
!= eth_get_headlen_flags
)
513 err
= tx_tap(tap_fd
, &tests
[i
].pkt
, sizeof(tests
[i
].pkt
));
514 CHECK(err
< 0, "tx_tap", "err %d errno %d\n", err
, errno
);
516 err
= bpf_map_lookup_elem(keys_fd
, &key
, &flow_keys
);
517 CHECK_ATTR(err
, tests
[i
].name
, "bpf_map_lookup_elem %d\n", err
);
519 CHECK_ATTR(err
, tests
[i
].name
, "skb-less err %d\n", err
);
520 CHECK_FLOW_KEYS(tests
[i
].name
, flow_keys
, tests
[i
].keys
);
522 err
= bpf_map_delete_elem(keys_fd
, &key
);
523 CHECK_ATTR(err
, tests
[i
].name
, "bpf_map_delete_elem %d\n", err
);
526 bpf_prog_detach(prog_fd
, BPF_FLOW_DISSECTOR
);
527 bpf_object__close(obj
);