1 /* Tests for BPF devices (LWIP) - by D.C. van Moolenbroek */
2 /* This test needs to be run as root: opening BPF devices is root-only. */
4 * We do not attempt to test the BPF filter code here. Such a test is better
5 * done through standardized tests and with direct use of the filter code.
6 * The current BPF filter implementation has been run through the FreeBSD
7 * BPF filter regression tests (from their tools/regression/bpf/bpf_filter), of
8 * which only the last test (0084 - "Check very long BPF program") failed due
9 * to our lower and strictly enforced BPF_MAXINSNS value. Future modifications
10 * of the BPF filter code should be tested against at least that test set.
15 #include <sys/ioctl.h>
16 #include <sys/socket.h>
17 #include <sys/sysctl.h>
20 #include <net/bpfdesc.h>
22 #include <net/if_types.h>
23 #include <net/if_ether.h>
24 #include <net/if_dl.h>
25 #include <netinet/in.h>
26 #include <netinet/ip.h>
27 #include <netinet/ip6.h>
28 #include <netinet/udp.h>
40 #define LOOPBACK_IFNAME "lo0"
42 #define TEST_PORT_A 12345
43 #define TEST_PORT_B 12346
45 #define SLEEP_TIME 250000 /* (us) - increases may require code changes */
47 #define NONROOT_USER "bin" /* name of any unprivileged user */
50 const struct in6_addr in6addr_loopback
= IN6ADDR_LOOPBACK_INIT
;
53 static unsigned int got_signal
;
59 test94_signal(int sig
)
62 if (sig
!= SIGUSR1
) e(0);
68 * Send UDP packets on the given socket 'fd' so as to fill up a BPF store
69 * buffer of size 'size' exactly. The provided buffer 'buf' may be used for
70 * packet generation and is at least of 'size' bytes. Return the number of
74 test94_fill_exact(int fd
, uint8_t * buf
, size_t size
, uint32_t seq
)
78 hdrlen
= BPF_WORDALIGN(sizeof(struct bpf_hdr
)) + sizeof(struct ip
) +
79 sizeof(struct udphdr
) + sizeof(seq
);
81 for (len
= 16; len
<= hdrlen
; len
<<= 1);
84 hdrlen
= BPF_WORDALIGN(hdrlen
- sizeof(seq
));
86 for (; size
> 0; seq
++) {
87 memset(buf
, 'Y', len
- hdrlen
);
88 if (len
- hdrlen
> sizeof(seq
))
89 buf
[sizeof(seq
)] = 'X';
90 buf
[len
- hdrlen
- 1] = 'Z';
91 memcpy(buf
, &seq
, sizeof(seq
));
93 if (write(fd
, buf
, len
- hdrlen
) != len
- hdrlen
) e(0);
102 * Send UDP packets on the given socket 'fd' so as to fill up at least a BPF
103 * store buffer of size 'size', with at least one more packet being sent. The
104 * provided buffer 'buf' may be used for packet generation and is at least of
108 test94_fill_random(int fd
, uint8_t * buf
, size_t size
)
114 hdrlen
= BPF_WORDALIGN(BPF_WORDALIGN(sizeof(struct bpf_hdr
)) +
115 sizeof(struct ip
) + sizeof(struct udphdr
));
117 /* Even if we fill the buffer exactly, we send one more packet. */
118 for (left
= (ssize_t
)size
, seq
= 1; left
>= 0; seq
++) {
119 len
= hdrlen
+ sizeof(seq
) + lrand48() % (size
/ 10);
121 memset(buf
, 'Y', len
- hdrlen
);
122 if (len
- hdrlen
> sizeof(seq
))
123 buf
[sizeof(seq
)] = 'X';
124 buf
[len
- hdrlen
- 1] = 'Z';
125 memcpy(buf
, &seq
, sizeof(seq
));
127 if (write(fd
, buf
, len
- hdrlen
) != len
- hdrlen
) e(0);
129 left
-= BPF_WORDALIGN(len
);
134 * Send a UDP packet with a specific size of 'size' bytes and sequence number
135 * 'seq' on socket 'fd', using 'buf' as scratch buffer.
138 test94_add_specific(int fd
, uint8_t * buf
, size_t size
, uint32_t seq
)
143 memset(buf
, 'Y', size
);
144 if (size
> sizeof(seq
))
145 buf
[sizeof(seq
)] = 'X';
147 memcpy(buf
, &seq
, sizeof(seq
));
149 if (write(fd
, buf
, size
) != size
) e(0);
153 * Send a randomly sized, relatively small UDP packet on the given socket 'fd',
154 * using sequence number 'seq'. The buffer 'buf' may be used as scratch buffer
155 * which is at most 'size' bytes--the same size as the total BPF buffer.
158 test94_add_random(int fd
, uint8_t * buf
, size_t size
, uint32_t seq
)
161 test94_add_specific(fd
, buf
, lrand48() % (size
/ 10), seq
);
165 * Check whether the packet in 'buf' of 'caplen' captured bytes out of
166 * 'datalen' data bytes is one we sent. If so, return an offset to the packet
167 * data. If not, return a negative value.
170 test94_check_pkt(uint8_t * buf
, ssize_t caplen
, ssize_t datalen
)
175 if (caplen
< sizeof(ip
))
178 memcpy(&ip
, buf
, sizeof(ip
));
180 if (ip
.ip_v
!= IPVERSION
)
182 if (ip
.ip_hl
!= sizeof(ip
) >> 2)
184 if (ip
.ip_p
!= IPPROTO_UDP
)
187 if (caplen
- sizeof(ip
) < sizeof(uh
))
190 memcpy(&uh
, buf
+ sizeof(ip
), sizeof(uh
));
192 if (uh
.uh_sport
!= htons(TEST_PORT_A
))
194 if (uh
.uh_dport
!= htons(TEST_PORT_B
))
197 if (datalen
- sizeof(ip
) != ntohs(uh
.uh_ulen
)) e(0);
199 return sizeof(ip
) + sizeof(uh
);
203 * Check whether the capture in 'buf' of 'len' bytes looks like a valid set of
204 * captured packets. The valid packets start from sequence number 'seq'; the
205 * next expected sequence number is returned. If 'filtered' is set, there
206 * should be no other packets in the capture; otherwise, other packets are
210 test94_check(uint8_t * buf
, ssize_t len
, uint32_t seq
, int filtered
,
211 uint32_t * caplen
, uint32_t * datalen
)
219 * We rely on the assumption that the last packet in the buffer
220 * is padded to alignment as well; if not, this check fails.
222 if (len
< BPF_WORDALIGN(sizeof(bh
))) e(0);
224 memcpy(&bh
, buf
, sizeof(bh
));
227 * The timestamp fields should be filled in. The tests that
228 * use this function do not set a capture length below the
229 * packet length. The header must be exactly as large as we
230 * expect: no small-size tricks (as NetBSD uses) and no
231 * unexpected extra padding.
233 if (bh
.bh_tstamp
.tv_sec
== 0 && bh
.bh_tstamp
.tv_usec
== 0)
235 if (caplen
!= NULL
) {
236 if (bh
.bh_caplen
!= *caplen
) e(0);
237 if (bh
.bh_datalen
!= *datalen
) e(0);
242 if (bh
.bh_datalen
!= bh
.bh_caplen
) e(0);
243 if (bh
.bh_hdrlen
!= BPF_WORDALIGN(sizeof(bh
))) e(0);
245 if (bh
.bh_hdrlen
+ BPF_WORDALIGN(bh
.bh_caplen
) > len
) e(0);
250 if ((off
= test94_check_pkt(buf
, bh
.bh_caplen
,
251 bh
.bh_datalen
)) < 0) {
254 buf
+= BPF_WORDALIGN(bh
.bh_caplen
);
255 len
-= BPF_WORDALIGN(bh
.bh_caplen
);
260 if (bh
.bh_caplen
< off
+ sizeof(seq
)) e(0);
262 memcpy(&nseq
, &buf
[off
], sizeof(nseq
));
264 if (nseq
!= seq
++) e(0);
267 if (off
< bh
.bh_caplen
) {
268 /* If there is just one byte, it is 'Z'. */
269 if (off
< bh
.bh_caplen
&& off
< bh
.bh_datalen
- 1) {
270 if (buf
[off
] != 'X') e(0);
272 for (off
++; off
< bh
.bh_caplen
&&
273 off
< bh
.bh_datalen
- 1; off
++)
274 if (buf
[off
] != 'Y') e(0);
276 if (off
< bh
.bh_caplen
&& off
== bh
.bh_datalen
- 1 &&
277 buf
[off
] != 'Z') e(0);
280 buf
+= BPF_WORDALIGN(bh
.bh_caplen
);
281 len
-= BPF_WORDALIGN(bh
.bh_caplen
);
288 * Filter program to ensure that the given (datalink-headerless) packet is an
289 * IPv4 UDP packet from port 12345 to port 12346. Important: the 'k' value of
290 * the last instruction must be the accepted packet size, and is modified by
291 * some of the tests further down!
293 static struct bpf_insn test94_filter
[] = {
294 { BPF_LD
+BPF_B
+BPF_ABS
, 0, 0, 0 }, /* is this an IPv4 header? */
295 { BPF_ALU
+BPF_RSH
+BPF_K
, 0, 0, 4 },
296 { BPF_JMP
+BPF_JEQ
+BPF_K
, 0, 7, 4 },
297 { BPF_LD
+BPF_B
+BPF_ABS
, 0, 0, 9 }, /* is this a UDP packet? */
298 { BPF_JMP
+BPF_JEQ
+BPF_K
, 0, 5, IPPROTO_UDP
},
299 { BPF_LDX
+BPF_B
+BPF_MSH
, 0, 0, 0 },
300 { BPF_LD
+BPF_H
+BPF_IND
, 0, 0, 0 }, /* source port 12345? */
301 { BPF_JMP
+BPF_JEQ
+BPF_K
, 0, 2, TEST_PORT_A
},
302 { BPF_LD
+BPF_H
+BPF_IND
, 0, 0, 2 }, /* destination port 12346? */
303 { BPF_JMP
+BPF_JEQ
+BPF_K
, 1, 0, TEST_PORT_B
},
304 { BPF_RET
+BPF_K
, 0, 0, 0 }, /* reject the packet */
305 { BPF_RET
+BPF_K
, 0, 0, (uint32_t)-1 }, /* accept the (whole) packet */
309 * Set up a BPF device, a pair of sockets of which traffic will be captured on
310 * the BPF device, a buffer for capturing packets, and optionally a filter.
311 * If the given size is non-zero, use that as buffer size. Return the BPF
312 * device's actual buffer size, which is also the size of 'buf'.
315 test94_setup(int * fd
, int * fd2
, int * fd3
, uint8_t ** buf
, unsigned int size
,
318 struct sockaddr_in sinA
, sinB
;
320 struct bpf_program bf
;
323 if ((*fd
= open(_PATH_BPF
, O_RDWR
)) < 0) e(0);
325 if (size
!= 0 && ioctl(*fd
, BIOCSBLEN
, &size
) != 0) e(0);
327 if (ioctl(*fd
, BIOCGBLEN
, &size
) != 0) e(0);
328 if (size
< 1024 || size
> BPF_MAXBUFSIZE
) e(0);
330 if ((*buf
= malloc(size
)) == NULL
) e(0);
334 * Install a filter to improve predictability for the tests.
336 memset(&bf
, 0, sizeof(bf
));
337 bf
.bf_len
= __arraycount(test94_filter
);
338 bf
.bf_insns
= test94_filter
;
339 if (ioctl(*fd
, BIOCSETF
, &bf
) != 0) e(0);
342 /* Bind to the loopback device. */
343 memset(&ifr
, 0, sizeof(ifr
));
344 strlcpy(ifr
.ifr_name
, LOOPBACK_IFNAME
, sizeof(ifr
.ifr_name
));
345 if (ioctl(*fd
, BIOCSETIF
, &ifr
) != 0) e(0);
348 * If the loopback device's data link type is not DLT_RAW, our filter
349 * and size calculations will not work.
351 if (ioctl(*fd
, BIOCGDLT
, &dlt
) != 0) e(0);
352 if (dlt
!= DLT_RAW
) e(0);
354 /* We use UDP traffic for our test packets. */
355 if ((*fd2
= socket(AF_INET
, SOCK_DGRAM
, 0)) < 0) e(0);
357 memset(&sinA
, 0, sizeof(sinA
));
358 sinA
.sin_family
= AF_INET
;
359 sinA
.sin_port
= htons(TEST_PORT_A
);
360 sinA
.sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
361 if (bind(*fd2
, (struct sockaddr
*)&sinA
, sizeof(sinA
)) != 0) e(0);
363 memcpy(&sinB
, &sinA
, sizeof(sinB
));
364 sinB
.sin_port
= htons(TEST_PORT_B
);
365 if (connect(*fd2
, (struct sockaddr
*)&sinB
, sizeof(sinB
)) != 0) e(0);
367 if ((*fd3
= socket(AF_INET
, SOCK_DGRAM
, 0)) < 0) e(0);
369 if (bind(*fd3
, (struct sockaddr
*)&sinB
, sizeof(sinB
)) != 0) e(0);
371 if (connect(*fd3
, (struct sockaddr
*)&sinA
, sizeof(sinA
)) != 0) e(0);
377 * Clean up resources allocated by test94_setup().
380 test94_cleanup(int fd
, int fd2
, int fd3
, uint8_t * buf
)
383 if (close(fd3
) != 0) e(0);
385 if (close(fd2
) != 0) e(0);
389 if (close(fd
) != 0) e(0);
393 * Test reading packets from a BPF device, using regular mode.
398 struct bpf_program bf
;
406 int fd
, fd2
, fd3
, status
, bytes
, fl
;
410 size
= test94_setup(&fd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
414 * Test that a filled-up store buffer will be returned to a pending
415 * read call. Perform this first test without a filter, to ensure that
416 * the default behavior is to accept all packets. The side effect is
417 * that we may receive other loopback traffic as part of our capture.
426 test94_fill_random(fd2
, buf
, size
);
437 len
= read(fd
, buf
, size
);
439 if (len
< size
* 3/4) e(0);
440 if (len
> size
) e(0);
441 test94_check(buf
, len
, 1 /*seq*/, 0 /*filtered*/, NULL
/*caplen*/,
444 if (wait(&status
) != pid
) e(0);
445 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
447 /* Only the exact buffer size may be used in read calls. */
448 if (read(fd
, buf
, size
- 1) != -1) e(0);
449 if (errno
!= EINVAL
) e(0);
450 if (read(fd
, buf
, size
+ 1) != -1) e(0);
451 if (errno
!= EINVAL
) e(0);
452 if (read(fd
, buf
, sizeof(struct bpf_hdr
)) != -1) e(0);
453 if (errno
!= EINVAL
) e(0);
456 * Install a filter to improve predictability for the remaining tests.
458 memset(&bf
, 0, sizeof(bf
));
459 bf
.bf_len
= __arraycount(test94_filter
);
460 bf
.bf_insns
= test94_filter
;
461 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
464 * Next we want to test that an already filled-up buffer will be
465 * returned to a read call immediately. We take the opportunity to
466 * test that filling the buffer will also wake up a blocked select
467 * call. In addition, we test ioctl(FIONREAD).
473 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 0) e(0);
474 if (FD_ISSET(fd
, &fds
)) e(0);
476 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
477 if (bytes
!= 0) e(0);
486 test94_fill_random(fd2
, buf
, size
);
499 if (select(fd
+ 1, &fds
, NULL
, NULL
, NULL
) != 1) e(0);
500 if (!FD_ISSET(fd
, &fds
)) e(0);
502 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
504 if (select(fd
+ 1, &fds
, NULL
, NULL
, NULL
) != 1) e(0);
505 if (!FD_ISSET(fd
, &fds
)) e(0);
507 len
= read(fd
, buf
, size
);
509 if (len
< size
* 3/4) e(0);
510 if (len
> size
) e(0);
511 seq
= test94_check(buf
, len
, 1 /*seq*/, 1 /*filtered*/,
512 NULL
/*caplen*/, NULL
/*datalen*/);
514 if (len
!= bytes
) e(0);
516 if (wait(&status
) != pid
) e(0);
517 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
519 /* There is one more packet in the store buffer at this point. */
524 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 0) e(0);
525 if (FD_ISSET(fd
, &fds
)) e(0);
527 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
528 if (bytes
!= 0) e(0);
531 * Next, we test whether read timeouts work, first checking that a
532 * timed-out read call returns any packets currently in the buffer.
533 * We use sleep and a signal as a crude way to test that the call was
534 * actually blocked until the timeout occurred.
543 signal(SIGUSR1
, test94_signal
);
547 test94_add_random(fd2
, buf
, size
, seq
+ 1);
551 if (got_signal
!= 0) e(0);
553 if (got_signal
!= 1) e(0);
565 tv
.tv_usec
= SLEEP_TIME
* 3;
566 if (ioctl(fd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
568 len
= read(fd
, buf
, size
);
570 if (len
>= size
* 3/4) e(0); /* two packets < 3/4 of the size */
571 if (test94_check(buf
, len
, seq
, 1 /*filtered*/, NULL
/*caplen*/,
572 NULL
/*datalen*/) != seq
+ 2) e(0);
574 if (kill(pid
, SIGUSR1
) != 0) e(0);
576 if (wait(&status
) != pid
) e(0);
577 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
580 * Next, see if a timed-out read will all buffers empty yields EAGAIN.
583 tv
.tv_usec
= SLEEP_TIME
;
584 if (ioctl(fd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
586 if (read(fd
, buf
, size
) != -1) e(0);
587 if (errno
!= EAGAIN
) e(0);
590 * Verify that resetting the timeout to zero makes the call block
591 * forever (for short test values of "forever" anyway), because
592 * otherwise this may create a false illusion of correctness in the
593 * next test, for non-blocking calls. As a side effect, this tests
594 * read call signal interruption, and ensures no partial results are
595 * returned in that case.
599 if (ioctl(fd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
606 signal(SIGUSR1
, test94_signal
);
608 if (read(fd
, buf
, size
) != -1) e(0);
609 if (errno
!= EINTR
) e(0);
611 if (got_signal
!= 1) e(0);
622 usleep(SLEEP_TIME
* 2);
624 if (kill(pid
, SIGUSR1
) != 0) e(0);
626 if (wait(&status
) != pid
) e(0);
627 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
630 * Repeat the same test with a non-full, non-empty buffer, to ensure
631 * that interrupted reads do not return partial results.
638 signal(SIGUSR1
, test94_signal
);
640 if (read(fd
, buf
, size
) != -1) e(0);
641 if (errno
!= EINTR
) e(0);
643 if (got_signal
!= 1) e(0);
656 test94_add_random(fd2
, buf
, size
, 2);
660 if (kill(pid
, SIGUSR1
) != 0) e(0);
662 if (wait(&status
) != pid
) e(0);
663 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
666 * Test non-blocking reads with empty, full, and non-empty buffers.
667 * Against common sense, the last case should return whatever is in
668 * the buffer rather than EAGAIN, like immediate-mode reads would.
670 if ((fl
= fcntl(fd
, F_GETFL
)) == -1) e(0);
671 if (fcntl(fd
, F_SETFL
, fl
| O_NONBLOCK
) != 0) e(0);
673 len
= read(fd
, buf
, size
);
675 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
676 seq
= test94_check(buf
, len
, 2 /*seq*/, 1 /*filtered*/,
677 NULL
/*caplen*/, NULL
/*datalen*/);
679 if (read(fd
, buf
, size
) != -1) e(0);
680 if (errno
!= EAGAIN
) e(0);
682 test94_fill_random(fd2
, buf
, size
);
684 len
= read(fd
, buf
, size
);
685 if (len
< size
* 3/4) e(0);
686 if (len
> size
) e(0);
687 seq
= test94_check(buf
, len
, 1 /*seq*/, 1 /*filtered*/,
688 NULL
/*caplen*/, NULL
/*datalen*/);
690 len
= read(fd
, buf
, size
);
693 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
694 if (test94_check(buf
, len
, seq
, 1 /*filtered*/, NULL
/*caplen*/,
695 NULL
/*datalen*/) != seq
+ 1) e(0);
697 if (fcntl(fd
, F_SETFL
, fl
) != 0) e(0);
700 * Test two remaining aspects of select(2): single-packet arrivals do
701 * not cause a wake-up, and the read timer has no effect. The latter
702 * is a deliberate implementation choice where we diverge from NetBSD,
703 * because it requires keeping state in a way that violates the
704 * principle of system call independence.
707 tv
.tv_usec
= SLEEP_TIME
* 2;
708 if (ioctl(fd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
717 test94_add_random(fd2
, buf
, size
, 1);
732 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 0) e(0);
734 if (wait(&status
) != pid
) e(0);
735 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
737 test94_cleanup(fd
, fd2
, fd3
, buf
);
741 * Test reading packets from a BPF device, using immediate mode.
754 int fd
, fd2
, fd3
, bytes
, status
, fl
;
758 size
= test94_setup(&fd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
762 if (ioctl(fd
, BIOCIMMEDIATE
, &val
) != 0) e(0);
768 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 0) e(0);
770 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
771 if (bytes
!= 0) e(0);
774 * Ensure that if the hold buffer is full, an immediate-mode read
775 * returns the content of the hold buffer, even if the store buffer is
778 test94_fill_random(fd2
, buf
, size
);
782 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 1) e(0);
783 if (!FD_ISSET(fd
, &fds
)) e(0);
785 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
787 len
= read(fd
, buf
, size
);
788 if (len
< size
* 3/4) e(0);
789 if (len
> size
) e(0);
790 seq
= test94_check(buf
, len
, 1 /*seq*/, 1 /*filtered*/,
791 NULL
/*caplen*/, NULL
/*datalen*/);
793 if (len
!= bytes
) e(0);
796 * There is one packet left in the buffer. In immediate mode, this
797 * packet should be returned immediately.
801 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 1) e(0);
802 if (!FD_ISSET(fd
, &fds
)) e(0);
804 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
806 len
= read(fd
, buf
, size
);
808 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
809 if (test94_check(buf
, len
, seq
, 1 /*filtered*/, NULL
/*caplen*/,
810 NULL
/*datalen*/) != seq
+ 1) e(0);
812 if (len
!= bytes
) e(0);
814 /* The buffer is now empty again. */
817 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 0) e(0);
819 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
820 if (bytes
!= 0) e(0);
823 * Immediate-mode reads may return multiple packets from the store
826 test94_add_random(fd2
, buf
, size
, seq
+ 1);
827 test94_add_random(fd2
, buf
, size
, seq
+ 2);
831 if (select(fd
+ 1, &fds
, NULL
, NULL
, &tv
) != 1) e(0);
832 if (!FD_ISSET(fd
, &fds
)) e(0);
834 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
836 len
= read(fd
, buf
, size
);
838 if (len
>= size
* 3/4) e(0); /* two packets < 3/4 of the size */
839 if (test94_check(buf
, len
, seq
+ 1, 1 /*filtered*/, NULL
/*caplen*/,
840 NULL
/*datalen*/) != seq
+ 3) e(0);
842 if (len
!= bytes
) e(0);
845 * Now test waking up suspended calls, read(2) first.
854 test94_add_random(fd2
, buf
, size
, seq
+ 3);
865 len
= read(fd
, buf
, size
);
867 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
868 if (test94_check(buf
, len
, seq
+ 3, 1 /*filtered*/, NULL
/*caplen*/,
869 NULL
/*datalen*/) != seq
+ 4) e(0);
871 if (wait(&status
) != pid
) e(0);
872 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
884 test94_add_random(fd2
, buf
, size
, seq
+ 4);
897 if (select(fd
+ 1, &fds
, NULL
, NULL
, NULL
) != 1) e(0);
898 if (!FD_ISSET(fd
, &fds
)) e(0);
900 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
902 if (select(fd
+ 1, &fds
, NULL
, NULL
, NULL
) != 1) e(0);
903 if (!FD_ISSET(fd
, &fds
)) e(0);
905 len
= read(fd
, buf
, size
);
907 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
908 if (test94_check(buf
, len
, seq
+ 4, 1 /*filtered*/, NULL
/*caplen*/,
909 NULL
/*datalen*/) != seq
+ 5) e(0);
911 if (len
!= bytes
) e(0);
913 if (wait(&status
) != pid
) e(0);
914 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
917 * Non-blocking reads should behave just as with regular mode.
919 if ((fl
= fcntl(fd
, F_GETFL
)) == -1) e(0);
920 if (fcntl(fd
, F_SETFL
, fl
| O_NONBLOCK
) != 0) e(0);
922 if (read(fd
, buf
, size
) != -1) e(0);
923 if (errno
!= EAGAIN
) e(0);
925 test94_fill_random(fd2
, buf
, size
);
927 len
= read(fd
, buf
, size
);
928 if (len
< size
* 3/4) e(0);
929 if (len
> size
) e(0);
930 seq
= test94_check(buf
, len
, 1 /*seq*/, 1 /*filtered*/,
931 NULL
/*caplen*/, NULL
/*datalen*/);
933 len
= read(fd
, buf
, size
);
935 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
936 if (test94_check(buf
, len
, seq
, 1 /*filtered*/, NULL
/*caplen*/,
937 NULL
/*datalen*/) != seq
+ 1) e(0);
939 if (fcntl(fd
, F_SETFL
, fl
) != 0) e(0);
942 * Timeouts should work with immediate mode.
945 tv
.tv_usec
= SLEEP_TIME
;
946 if (ioctl(fd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
948 if (read(fd
, buf
, size
) != -1) e(0);
949 if (errno
!= EAGAIN
) e(0);
951 test94_cleanup(fd
, fd2
, fd3
, buf
);
955 * Test reading packets from a BPF device, with an exactly filled buffer. The
956 * idea is that normally the store buffer is considered "full" if the next
957 * packet does not fit in it, but if no more bytes are left in it, it can be
958 * rotated immediately. This is a practically useless edge case, but we
959 * support it, so we might as well test it. Also, some of the code for this
960 * case is shared with other rare cases that we cannot test here (interfaces
961 * disappearing, to be specific), and exactly filling up the buffers does test
962 * some other bounds checks so all that might make this worth it anyway. While
963 * we are exercising full control over our buffers, also check statistics.
974 int fd
, fd2
, fd3
, bytes
, status
, fl
;
978 size
= test94_setup(&fd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
981 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
982 if (bs
.bs_capt
!= 0) e(0);
983 if (bs
.bs_drop
!= 0) e(0);
986 * Test read, select, and ioctl(FIONREAD) on an exactly filled buffer.
988 count
= test94_fill_exact(fd2
, buf
, size
, 0);
990 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
991 if (bs
.bs_capt
!= count
) e(0);
992 if (bs
.bs_recv
< bs
.bs_capt
) e(0); /* may be more */
993 if (bs
.bs_drop
!= 0) e(0);
995 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
996 if (bytes
!= size
) e(0);
1000 if (select(fd
+ 1, &fds
, NULL
, NULL
, NULL
) != 1) e(0);
1001 if (!FD_ISSET(fd
, &fds
)) e(0);
1003 if (read(fd
, buf
, size
) != size
) e(0);
1004 test94_check(buf
, size
, 0 /*seq*/, 1 /*filtered*/, NULL
/*caplen*/,
1008 * If the store buffer is full, the buffers should be swapped after
1009 * emptying the hold buffer.
1011 seq
= test94_fill_exact(fd2
, buf
, size
, 1);
1012 test94_fill_exact(fd2
, buf
, size
, seq
);
1014 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1015 if (bs
.bs_capt
!= count
* 3) e(0);
1016 if (bs
.bs_recv
< bs
.bs_capt
) e(0); /* may be more */
1017 if (bs
.bs_drop
!= 0) e(0);
1019 test94_add_random(fd2
, buf
, size
, 0); /* this one will get dropped */
1021 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1022 if (bs
.bs_capt
!= count
* 3 + 1) e(0);
1023 if (bs
.bs_recv
< bs
.bs_capt
) e(0); /* may be more */
1024 if (bs
.bs_drop
!= 1) e(0);
1026 test94_add_random(fd2
, buf
, size
, 0); /* this one will get dropped */
1028 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1029 if (bs
.bs_capt
!= count
* 3 + 2) e(0);
1030 if (bs
.bs_recv
< bs
.bs_capt
) e(0); /* may be more */
1031 if (bs
.bs_drop
!= 2) e(0);
1033 if (ioctl(fd
, FIONREAD
, &bytes
) != 0) e(0);
1034 if (bytes
!= size
) e(0);
1036 if (read(fd
, buf
, size
) != size
) e(0);
1037 if (test94_check(buf
, size
, 1 /*seq*/, 1 /*filtered*/, NULL
/*caplen*/,
1038 NULL
/*datalen*/) != seq
) e(0);
1040 if (read(fd
, buf
, size
) != size
) e(0);
1041 if (test94_check(buf
, size
, seq
, 1 /*filtered*/, NULL
/*caplen*/,
1042 NULL
/*datalen*/) != count
* 2 + 1) e(0);
1045 * See if an exactly filled buffer resumes reads...
1054 test94_fill_exact(fd2
, buf
, size
, 1);
1065 if (read(fd
, buf
, size
) != size
) e(0);
1066 test94_check(buf
, size
, 1 /*seq*/, 1 /*filtered*/, NULL
/*caplen*/,
1069 if (wait(&status
) != pid
) e(0);
1070 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
1082 test94_fill_exact(fd2
, buf
, size
, seq
);
1095 if (select(fd
+ 1, &fds
, NULL
, NULL
, NULL
) != 1) e(0);
1096 if (!FD_ISSET(fd
, &fds
)) e(0);
1098 if ((fl
= fcntl(fd
, F_GETFL
)) == -1) e(0);
1099 if (fcntl(fd
, F_SETFL
, fl
| O_NONBLOCK
) != 0) e(0);
1101 if (read(fd
, buf
, size
) != size
) e(0);
1102 test94_check(buf
, size
, seq
, 1 /*filtered*/, NULL
/*caplen*/,
1105 if (read(fd
, buf
, size
) != -1) e(0);
1106 if (errno
!= EAGAIN
) e(0);
1108 if (wait(&status
) != pid
) e(0);
1109 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
1111 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1112 if (bs
.bs_capt
!= count
* 5 + 2) e(0);
1113 if (bs
.bs_recv
< bs
.bs_capt
) e(0); /* may be more */
1114 if (bs
.bs_drop
!= 2) e(0);
1116 test94_cleanup(fd
, fd2
, fd3
, buf
);
1120 * Test receipt of large packets on BPF devices. Large packets should be
1121 * truncated to the size of the buffer, but unless the filter specifies a
1122 * smaller capture size, no more than that.
1128 uint8_t *buf
, *buf2
;
1131 int fd
, fd2
, fd3
, datalen
;
1136 * Specify a size smaller than the largest packet we can send on the
1137 * loopback device. The size we specify here is currently the default
1138 * size already anyway, but that might change in the future.
1140 size
= test94_setup(&fd
, &fd2
, &fd3
, &buf
, 32768 /*size*/,
1142 if (size
!= 32768) e(0);
1145 if (setsockopt(fd2
, SOL_SOCKET
, SO_SNDBUF
, &datalen
,
1146 sizeof(datalen
)) != 0) e(0);
1148 if ((buf2
= malloc(datalen
)) == NULL
) e(0);
1150 memset(buf2
, 'Y', datalen
);
1152 buf2
[size
- sizeof(struct udphdr
) - sizeof(struct ip
) -
1153 BPF_WORDALIGN(sizeof(bh
)) - 1] = 'Z';
1155 if (write(fd2
, buf2
, datalen
) != datalen
) e(0);
1157 if (read(fd
, buf
, size
) != size
) e(0);
1159 memcpy(&bh
, buf
, sizeof(bh
));
1161 if (bh
.bh_hdrlen
!= BPF_WORDALIGN(sizeof(bh
))) e(0);
1162 if (bh
.bh_caplen
!= size
- BPF_WORDALIGN(sizeof(bh
))) e(0);
1163 if (bh
.bh_datalen
!=
1164 sizeof(struct ip
) + sizeof(struct udphdr
) + datalen
) e(0);
1166 if (buf
[BPF_WORDALIGN(sizeof(bh
)) + sizeof(struct ip
) +
1167 sizeof(struct udphdr
)] != 'X') e(0);
1168 if (buf
[size
- 2] != 'Y') e(0);
1169 if (buf
[size
- 1] != 'Z') e(0);
1172 * Add a smaller packet in between, to ensure that 1) the large packet
1173 * is not split across buffers, and 2) the packet is truncated to the
1174 * size of the buffer, not the available part of the buffer. Note how
1175 * forced rotation and our exact-fill policy preclude us from having to
1176 * use immediate mode for any of this.
1178 test94_add_random(fd2
, buf
, size
, 1 /*seq*/);
1180 if (write(fd2
, buf2
, datalen
) != datalen
) e(0);
1182 len
= read(fd
, buf
, size
);
1184 if (len
>= size
* 3/4) e(0); /* one packet < 3/4 of the size */
1185 if (test94_check(buf
, len
, 1 /*seq*/, 1 /*filtered*/, NULL
/*caplen*/,
1186 NULL
/*datalen*/) != 2) e(0);
1188 if (read(fd
, buf
, size
) != size
) e(0);
1190 memcpy(&bh
, buf
, sizeof(bh
));
1192 if (bh
.bh_hdrlen
!= BPF_WORDALIGN(sizeof(bh
))) e(0);
1193 if (bh
.bh_caplen
!= size
- BPF_WORDALIGN(sizeof(bh
))) e(0);
1194 if (bh
.bh_datalen
!=
1195 sizeof(struct ip
) + sizeof(struct udphdr
) + datalen
) e(0);
1197 if (buf
[BPF_WORDALIGN(sizeof(bh
)) + sizeof(struct ip
) +
1198 sizeof(struct udphdr
)] != 'X') e(0);
1199 if (buf
[size
- 2] != 'Y') e(0);
1200 if (buf
[size
- 1] != 'Z') e(0);
1204 test94_cleanup(fd
, fd2
, fd3
, buf
);
1208 * Test whether our filter is active through two-way communication and a
1209 * subsequent check on the BPF statistics. We do not actually look through the
1210 * captured packets, because who knows what else is active on the loopback
1211 * device (e.g., X11) and the extra code specifically to extract our packets in
1212 * the other direction is simply not worth it.
1215 test94_comm(int fd
, int fd2
, int fd3
, int filtered
)
1220 if (write(fd2
, "A", 1) != 1) e(0);
1222 if (read(fd3
, &c
, 1) != 1) e(0);
1225 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1226 if (bs
.bs_recv
== 0) e(0);
1227 if (bs
.bs_capt
== 0) e(0);
1229 if (ioctl(fd
, BIOCFLUSH
) != 0) e(0);
1231 if (write(fd3
, "B", 1) != 1) e(0);
1233 if (read(fd2
, &c
, 1) != 1) e(0);
1236 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1237 if (bs
.bs_recv
== 0) e(0);
1240 if (bs
.bs_capt
!= 0) e(0);
1241 if (bs
.bs_drop
!= 0) e(0);
1243 if (bs
.bs_capt
== 0) e(0);
1245 if (ioctl(fd
, BIOCFLUSH
) != 0) e(0);
1249 * Test filter installation and mechanics.
1254 struct bpf_program bf
;
1258 size_t size
, len
, plen
, alen
, off
;
1259 uint32_t seq
, caplen
[4], datalen
[4];
1260 int i
, fd
, fd2
, fd3
, val
;
1265 * We have already tested installing a filter both before and after
1266 * attaching to an interface by now, so we do not repeat that here.
1268 size
= test94_setup(&fd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
1272 if (ioctl(fd
, BIOCIMMEDIATE
, &val
) != 0) e(0);
1275 * A filter that is too large is rejected. Unfortunately, due to
1276 * necessary IOCTL rewriting, this tests libc, not the service.
1278 memset(&bf
, 0, sizeof(bf
));
1279 bf
.bf_len
= BPF_MAXINSNS
+ 1;
1281 if (ioctl(fd
, BIOCSETF
, &bf
) != -1) e(0);
1282 if (errno
!= EINVAL
) e(0);
1285 * An invalid filter is rejected. In this test case, the truncated
1286 * filter has a jump target beyond the end of the filter program.
1288 memset(&bf
, 0, sizeof(bf
));
1289 bf
.bf_len
= __arraycount(test94_filter
) - 1;
1290 bf
.bf_insns
= test94_filter
;
1291 if (ioctl(fd
, BIOCSETF
, &bf
) != -1) e(0);
1292 if (errno
!= EINVAL
) e(0);
1294 test94_comm(fd
, fd2
, fd3
, 0 /*filtered*/);
1297 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1299 test94_comm(fd
, fd2
, fd3
, 1 /*filtered*/);
1302 * Installing a zero-length filter clears the current filter, if any.
1304 memset(&bf
, 0, sizeof(bf
));
1305 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1307 test94_comm(fd
, fd2
, fd3
, 0 /*filtered*/);
1309 /* Test this twice to trip over unconditional filter deallocation. */
1310 memset(&bf
, 0, sizeof(bf
));
1311 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1313 test94_comm(fd
, fd2
, fd3
, 0 /*filtered*/);
1316 * Test both aligned and unaligned capture sizes. For each, test
1317 * sizes larger than, equal to, and smaller than the capture size.
1318 * In both cases, aggregate the packets into a single buffer and only
1319 * then go through them, to see whether alignment was done correctly.
1320 * We cannot do everything in one go as BIOCSETF implies a BIOCFLUSH.
1322 plen
= sizeof(struct ip
) + sizeof(struct udphdr
) + sizeof(seq
);
1323 if (BPF_WORDALIGN(plen
) != plen
) e(0);
1324 alen
= BPF_WORDALIGN(plen
+ 1);
1325 if (alen
- 2 <= plen
+ 1) e(0);
1327 /* First the aligned cases. */
1328 test94_filter
[__arraycount(test94_filter
) - 1].k
= alen
;
1330 memset(&bf
, 0, sizeof(bf
));
1331 bf
.bf_len
= __arraycount(test94_filter
);
1332 bf
.bf_insns
= test94_filter
;
1333 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1335 test94_comm(fd
, fd2
, fd3
, 1 /*filtered*/);
1337 test94_add_specific(fd2
, buf
, alen
+ 1 - plen
, 1);
1339 datalen
[0] = alen
+ 1;
1341 test94_add_specific(fd2
, buf
, alen
- plen
, 2);
1345 test94_add_specific(fd2
, buf
, alen
+ 3 - plen
, 3);
1347 datalen
[2] = alen
+ 3;
1349 test94_add_specific(fd2
, buf
, alen
- 1 - plen
, 4);
1350 caplen
[3] = alen
- 1;
1351 datalen
[3] = alen
- 1;
1353 memset(buf
, 0, size
);
1355 len
= read(fd
, buf
, size
);
1357 if (test94_check(buf
, len
, 1 /*seq*/, 1 /*filtered*/, caplen
,
1358 datalen
) != 5) e(0);
1360 /* Then the unaligned cases. */
1361 test94_filter
[__arraycount(test94_filter
) - 1].k
= alen
+ 1;
1362 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1364 test94_add_specific(fd2
, buf
, alen
+ 2 - plen
, 5);
1365 caplen
[0] = alen
+ 1;
1366 datalen
[0] = alen
+ 2;
1368 test94_add_specific(fd2
, buf
, alen
+ 1 - plen
, 6);
1369 caplen
[1] = alen
+ 1;
1370 datalen
[1] = alen
+ 1;
1372 test94_add_specific(fd2
, buf
, alen
+ 9 - plen
, 7);
1373 caplen
[2] = alen
+ 1;
1374 datalen
[2] = alen
+ 9;
1376 test94_add_specific(fd2
, buf
, alen
- plen
, 8);
1380 memset(buf
, 0, size
);
1382 len
= read(fd
, buf
, size
);
1384 if (test94_check(buf
, len
, 5 /*seq*/, 1 /*filtered*/, caplen
,
1385 datalen
) != 9) e(0);
1388 * Check that capturing only one byte from packets is possible. Not
1389 * that that would be particularly useful.
1391 test94_filter
[__arraycount(test94_filter
) - 1].k
= 1;
1392 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1394 test94_add_random(fd2
, buf
, size
, 9);
1395 test94_add_random(fd2
, buf
, size
, 10);
1396 test94_add_random(fd2
, buf
, size
, 11);
1398 memset(buf
, 0, size
);
1400 len
= read(fd
, buf
, size
);
1404 for (i
= 0; i
< 3; i
++) {
1405 if (len
- off
< sizeof(bh
)) e(0);
1406 memcpy(&bh
, &buf
[off
], sizeof(bh
));
1408 if (bh
.bh_tstamp
.tv_sec
== 0 && bh
.bh_tstamp
.tv_usec
== 0)
1410 if (bh
.bh_caplen
!= 1) e(0);
1411 if (bh
.bh_datalen
< plen
) e(0);
1412 if (bh
.bh_hdrlen
!= BPF_WORDALIGN(sizeof(bh
))) e(0);
1414 off
+= bh
.bh_hdrlen
;
1416 if (buf
[off
] != 0x45) e(0);
1418 off
+= BPF_WORDALIGN(bh
.bh_caplen
);
1420 if (off
!= len
) e(0);
1423 * Finally, a zero capture size should result in rejected packets only.
1425 test94_filter
[__arraycount(test94_filter
) - 1].k
= 0;
1426 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
1428 test94_add_random(fd2
, buf
, size
, 12);
1429 test94_add_random(fd2
, buf
, size
, 13);
1430 test94_add_random(fd2
, buf
, size
, 14);
1432 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1433 if (bs
.bs_recv
< 3) e(0);
1434 if (bs
.bs_capt
!= 0) e(0);
1435 if (bs
.bs_drop
!= 0) e(0);
1437 /* Restore the capture limit of the filter to its original state. */
1438 test94_filter
[__arraycount(test94_filter
) - 1].k
= (uint32_t)-1;
1440 test94_cleanup(fd
, fd2
, fd3
, buf
);
1444 * Compute an IP checksum.
1447 test94_cksum(uint8_t * buf
, size_t len
)
1451 /* This is a really dumb implementation but *shrug*. */
1452 for (sum
= 0; len
> 0; sum
+= word
) {
1454 word
= buf
[0] << 8 | buf
[1];
1463 while (sum
> UINT16_MAX
)
1464 sum
= (sum
& UINT16_MAX
) + (sum
>> 16);
1466 return ~(uint16_t)sum
;
1470 * Set up UDP headers for a packet. The packet uses IPv4 unless 'v6' is set,
1471 * in which case IPv6 is used. The given buffer must be large enough to
1472 * contain the headers and the (to be appended) data. The function returns the
1473 * offset into the buffer to the data portion of the packet.
1476 test94_make_pkt(uint8_t * buf
, size_t len
, int v6
)
1484 memset(&ip
, 0, sizeof(ip
));
1485 ip
.ip_v
= IPVERSION
;
1486 ip
.ip_hl
= sizeof(ip
) >> 2;
1487 ip
.ip_len
= htons(sizeof(ip
) + sizeof(uh
) + len
);
1489 ip
.ip_p
= IPPROTO_UDP
;
1491 ip
.ip_src
.s_addr
= htonl(INADDR_LOOPBACK
);
1492 ip
.ip_dst
.s_addr
= htonl(INADDR_LOOPBACK
);
1494 memcpy(buf
, &ip
, sizeof(ip
));
1495 ip
.ip_sum
= htons(test94_cksum(buf
, sizeof(ip
)));
1496 memcpy(buf
, &ip
, sizeof(ip
));
1497 if (test94_cksum(buf
, sizeof(ip
)) != 0) e(0);
1501 memset(&ip6
, 0, sizeof(ip6
));
1502 ip6
.ip6_vfc
= IPV6_VERSION
;
1503 ip6
.ip6_plen
= htons(sizeof(uh
) + len
);
1504 ip6
.ip6_nxt
= IPPROTO_UDP
;
1506 memcpy(&ip6
.ip6_src
, &in6addr_loopback
, sizeof(ip6
.ip6_src
));
1507 memcpy(&ip6
.ip6_dst
, &in6addr_loopback
, sizeof(ip6
.ip6_dst
));
1509 memcpy(buf
, &ip6
, sizeof(ip6
));
1514 memset(&uh
, 0, sizeof(uh
));
1515 uh
.uh_sport
= htons(TEST_PORT_A
);
1516 uh
.uh_dport
= htons(TEST_PORT_B
);
1517 uh
.uh_ulen
= htons(sizeof(uh
) + len
);
1518 uh
.uh_sum
= 0; /* lazy but we also don't have the data yet */
1520 memcpy(buf
+ off
, &uh
, sizeof(uh
));
1522 return off
+ sizeof(uh
);
1526 * Test sending packets by writing to a BPF device.
1536 unsigned int i
, uval
, mtu
;
1541 (void)test94_setup(&fd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
1545 * Select queries should always indicate that the device is writable.
1549 if (select(fd
+ 1, NULL
, &fds
, NULL
, NULL
) != 1) e(0);
1550 if (!FD_ISSET(fd
, &fds
)) e(0);
1553 * Test packet size limits. For loopback devices, the maximum data
1554 * link layer level maximum transmission unit should be 65535-4 =
1555 * 65531 bytes. Obtain the actual value anyway; it might have changed.
1557 memset(&ifr
, 0, sizeof(ifr
));
1558 strlcpy(ifr
.ifr_name
, LOOPBACK_IFNAME
, sizeof(ifr
.ifr_name
));
1560 if (ioctl(fd2
, SIOCGIFMTU
, &ifr
) != 0) e(0);
1563 if ((buf
= realloc(buf
, UINT16_MAX
+ 1)) == NULL
) e(0);
1565 memset(buf
, 0, UINT16_MAX
+ 1);
1567 for (i
= UINT16_MAX
+ 1; i
> mtu
; i
--) {
1568 if (write(fd
, buf
, i
) != -1) e(0);
1569 if (errno
!= EMSGSIZE
) e(0);
1572 /* This packet will be discarded as completely crap. That's fine. */
1573 if (write(fd
, buf
, mtu
) != mtu
) e(0);
1576 * Zero-sized writes are accepted but do not do anything.
1578 if (write(fd
, buf
, 0) != 0) e(0);
1581 * Send an actual packet, and see if it arrives.
1583 off
= test94_make_pkt(buf
, 6, 0 /*v6*/);
1584 memcpy(buf
+ off
, "Hello!", 6);
1586 if (write(fd
, buf
, off
+ 6) != off
+ 6) e(0);
1588 memset(buf
, 0, mtu
);
1589 if (read(fd3
, buf
, mtu
) != 6) e(0);
1590 if (memcmp(buf
, "Hello!", 6) != 0) e(0);
1593 * Enable feedback mode to test that the packet now arrives twice.
1594 * Send a somewhat larger packet to test that data copy-in handles
1595 * offsets correctly.
1598 if (ioctl(fd
, BIOCSFEEDBACK
, &uval
) != 0) e(0);
1600 off
= test94_make_pkt(buf
, 12345, 0 /*v6*/);
1601 for (i
= 0; i
< 12345; i
++)
1602 buf
[off
+ i
] = 1 + (i
% 251); /* the largest prime < 255 */
1604 if (write(fd
, buf
, off
+ 12345) != off
+ 12345) e(0);
1606 /* We need a default UDP SO_RCVBUF >= 12345 * 2 for this. */
1607 memset(buf
, 0, UINT16_MAX
);
1608 if (recv(fd3
, buf
, UINT16_MAX
, 0) != 12345) e(0);
1609 for (i
= 0; i
< 12345; i
++)
1610 if (buf
[i
] != 1 + (i
% 251)) e(0);
1612 memset(buf
, 0, UINT16_MAX
);
1613 if (recv(fd3
, buf
, UINT16_MAX
, MSG_DONTWAIT
) != 12345) e(0);
1614 for (i
= 0; i
< 12345; i
++)
1615 if (buf
[i
] != 1 + (i
% 251)) e(0);
1617 if (recv(fd3
, buf
, UINT16_MAX
, MSG_DONTWAIT
) != -1) e(0);
1618 if (errno
!= EWOULDBLOCK
) e(0);
1621 * The two valid packets we sent will have been captured by our BPF
1622 * device as well, because SEESENT is enabled by default and also
1623 * applies to packets written to a BPF device. The reason for that is
1624 * that it allows tcpdump(8) to see what DHCP clients are sending, for
1625 * example. The packets we sent are accepted by the installed filter.
1627 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
1628 if (bs
.bs_capt
!= 2) e(0);
1630 /* Now that we've written data, test select once more. */
1633 if (select(fd
+ 1, NULL
, &fds
, NULL
, NULL
) != 1) e(0);
1634 if (!FD_ISSET(fd
, &fds
)) e(0);
1636 test94_cleanup(fd
, fd2
, fd3
, buf
);
1640 * Test read, write, and select operations on unconfigured devices.
1652 if ((fd
= open(_PATH_BPF
, O_RDWR
)) < 0) e(0);
1654 if (ioctl(fd
, BIOCGBLEN
, &size
) != 0) e(0);
1655 if (size
< 1024 || size
> BPF_MAXBUFSIZE
) e(0);
1657 if ((buf
= malloc(size
)) == NULL
) e(0);
1659 if (read(fd
, buf
, size
) != -1) e(0);
1660 if (errno
!= EINVAL
) e(0);
1662 if (write(fd
, buf
, size
) != -1) e(0);
1663 if (errno
!= EINVAL
) e(0);
1670 if (select(fd
+ 1, &rfds
, &wfds
, NULL
, NULL
) != 2) e(0);
1672 if (!FD_ISSET(fd
, &rfds
)) e(0);
1673 if (!FD_ISSET(fd
, &wfds
)) e(0);
1677 if (close(fd
) != 0) e(0);
1681 * Test various IOCTL calls. Several of these tests are rather superficial,
1682 * because we would need a real interface, rather than the loopback device, to
1683 * test their functionality properly. Also note that we skip various checks
1684 * performed as part of the earlier subtests.
1690 struct bpf_version bv
;
1691 struct bpf_dltlist bfl
;
1696 unsigned int uval
, list
[2];
1697 int cfd
, ufd
, fd2
, fd3
, val
;
1702 * Many IOCTLs work only on configured or only on unconfigured BPF
1703 * devices, so for convenience we create a file descriptor for each.
1705 size
= test94_setup(&cfd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
1708 if ((ufd
= open(_PATH_BPF
, O_RDWR
)) < 0) e(0);
1711 * The BIOCSBLEN value is silently corrected to fall within a valid
1712 * range, and BIOCGBLEN can be used to obtain the corrected value. We
1713 * do not know the valid range, so we use fairly extreme test values.
1716 if (ioctl(ufd
, BIOCSBLEN
, &uval
) != 0) e(0);
1718 if (ioctl(ufd
, BIOCGBLEN
, &uval
) != 0) e(0);
1719 if (uval
< sizeof(struct bpf_hdr
) || uval
> BPF_MAXBUFSIZE
) e(0);
1721 uval
= (unsigned int)-1;
1722 if (ioctl(ufd
, BIOCSBLEN
, &uval
) != 0) e(0);
1724 if (ioctl(ufd
, BIOCGBLEN
, &uval
) != 0) e(0);
1725 if (uval
< sizeof(struct bpf_hdr
) || uval
> BPF_MAXBUFSIZE
) e(0);
1728 if (ioctl(ufd
, BIOCSBLEN
, &uval
) != 0) e(0);
1730 if (ioctl(ufd
, BIOCGBLEN
, &uval
) != 0) e(0);
1731 if (uval
< sizeof(struct bpf_hdr
) || uval
> BPF_MAXBUFSIZE
) e(0);
1733 uval
= 1024; /* ..a value that should be acceptable but small */
1734 if (ioctl(ufd
, BIOCSBLEN
, &uval
) != 0) e(0);
1735 if (ioctl(ufd
, BIOCGBLEN
, &uval
) != 0) e(0);
1736 if (uval
!= 1024) e(0);
1739 * For configured devices, it is not possible to adjust the buffer size
1740 * but it is possible to obtain its size.
1742 if (ioctl(cfd
, BIOCSBLEN
, &uval
) != -1) e(0);
1743 if (errno
!= EINVAL
) e(0);
1745 if (ioctl(cfd
, BIOCGBLEN
, &uval
) != 0) e(0);
1746 if (uval
!= size
) e(0);
1749 * BIOCFLUSH resets both buffer contents and statistics.
1752 if (ioctl(cfd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
1754 test94_fill_exact(fd2
, buf
, size
, 1 /*seq*/);
1755 test94_fill_exact(fd2
, buf
, size
, 1 /*seq*/);
1756 test94_fill_exact(fd2
, buf
, size
, 1 /*seq*/);
1758 if (ioctl(cfd
, BIOCGSTATS
, &bs
) != 0) e(0);
1759 if (bs
.bs_recv
== 0) e(0);
1760 if (bs
.bs_drop
== 0) e(0);
1761 if (bs
.bs_capt
== 0) e(0);
1763 /* Do make sure that statistics are not cleared on retrieval.. */
1764 if (ioctl(cfd
, BIOCGSTATS
, &bs
) != 0) e(0);
1765 if (bs
.bs_recv
== 0) e(0);
1766 if (bs
.bs_drop
== 0) e(0);
1767 if (bs
.bs_capt
== 0) e(0);
1769 if (ioctl(cfd
, FIONREAD
, &val
) != 0) e(0);
1772 if (ioctl(cfd
, BIOCFLUSH
) != 0) e(0);
1774 /* There is a race condition for bs_recv here, so we cannot test it. */
1775 if (ioctl(cfd
, BIOCGSTATS
, &bs
) != 0) e(0);
1776 if (bs
.bs_drop
!= 0) e(0);
1777 if (bs
.bs_capt
!= 0) e(0);
1779 if (ioctl(cfd
, FIONREAD
, &val
) != 0) e(0);
1783 * Although practically useless, BIOCFLUSH works on unconfigured
1784 * devices. So does BIOCGSTATS.
1786 if (ioctl(ufd
, BIOCFLUSH
) != 0) e(0);
1788 if (ioctl(ufd
, BIOCGSTATS
, &bs
) != 0) e(0);
1789 if (bs
.bs_recv
!= 0) e(0);
1790 if (bs
.bs_drop
!= 0) e(0);
1791 if (bs
.bs_capt
!= 0) e(0);
1794 * BIOCPROMISC works on configured devices only. On loopback devices
1795 * it has no observable effect though.
1797 if (ioctl(ufd
, BIOCPROMISC
) != -1) e(0);
1798 if (errno
!= EINVAL
) e(0);
1800 if (ioctl(cfd
, BIOCPROMISC
) != 0) e(0);
1803 * BIOCGDLT does not work on unconfigured devices.
1805 if (ioctl(ufd
, BIOCGDLT
, &uval
) != -1) e(0);
1806 if (errno
!= EINVAL
) e(0);
1809 * BIOCGETIF works only on configured devices, where it returns the
1810 * associated device name.
1812 if (ioctl(ufd
, BIOCGETIF
, &ifr
) != -1) e(0);
1813 if (errno
!= EINVAL
) e(0);
1815 memset(&ifr
, 'X', sizeof(ifr
));
1816 if (ioctl(cfd
, BIOCGETIF
, &ifr
) != 0) e(0);
1817 if (strcmp(ifr
.ifr_name
, LOOPBACK_IFNAME
) != 0) e(0);
1820 * BIOCSETIF works only on unconfigured devices, and accepts only valid
1821 * valid interface names. The name is forced to be null terminated.
1823 memset(&ifr
, 0, sizeof(ifr
));
1824 strlcpy(ifr
.ifr_name
, LOOPBACK_IFNAME
, sizeof(ifr
.ifr_name
));
1825 if (ioctl(cfd
, BIOCSETIF
, &ifr
) != -1) e(0);
1826 if (errno
!= EINVAL
) e(0);
1828 memset(&ifr
, 0, sizeof(ifr
));
1829 memset(ifr
.ifr_name
, 'x', sizeof(ifr
.ifr_name
));
1830 if (ioctl(ufd
, BIOCSETIF
, &ifr
) != -1) e(0);
1831 if (errno
!= ENXIO
) e(0);
1833 /* Anyone that has ten loopback devices is simply insane. */
1834 memset(&ifr
, 0, sizeof(ifr
));
1835 strlcpy(ifr
.ifr_name
, LOOPBACK_IFNAME
, sizeof(ifr
.ifr_name
));
1836 ifr
.ifr_name
[strlen(ifr
.ifr_name
) - 1] += 9;
1837 if (ioctl(ufd
, BIOCSETIF
, &ifr
) != -1) e(0);
1838 if (errno
!= ENXIO
) e(0);
1841 * It is possible to turn BIOCIMMEDIATE on and off. We already enabled
1842 * it a bit higher up. Note that our implementation does not support
1843 * toggling the setting while a read call is no progress, and toggling
1844 * the setting will have no effect while a select call is in progress;
1845 * similar restrictions apply to effectively all relevant settings.
1846 * Either way we do not test that here either.
1848 test94_add_random(fd2
, buf
, size
, 1 /*seq*/);
1850 if (ioctl(cfd
, FIONREAD
, &val
) != 0) e(0);
1854 if (ioctl(cfd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
1856 if (ioctl(cfd
, FIONREAD
, &val
) != 0) e(0);
1860 if (ioctl(cfd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
1862 if (ioctl(cfd
, FIONREAD
, &val
) != 0) e(0);
1865 if (ioctl(cfd
, BIOCFLUSH
) != 0) e(0);
1868 * BIOCIMMEDIATE also works on unconfigured devices.
1871 if (ioctl(ufd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
1874 if (ioctl(ufd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
1877 * BIOCVERSION should return the current BPF interface version.
1879 if (ioctl(ufd
, BIOCVERSION
, &bv
) != 0) e(0);
1880 if (bv
.bv_major
!= BPF_MAJOR_VERSION
) e(0);
1881 if (bv
.bv_minor
!= BPF_MINOR_VERSION
) e(0);
1884 * BIOCSHDRCMPLT makes sense only for devices with data link headers,
1885 * which rules out loopback devices. Check the default and test
1886 * toggling it, and stop there.
1888 /* The default value is off. */
1890 if (ioctl(ufd
, BIOCGHDRCMPLT
, &uval
) != 0) e(0);
1891 if (uval
!= 0) e(0);
1894 if (ioctl(ufd
, BIOCSHDRCMPLT
, &uval
) != 0) e(0);
1896 if (ioctl(ufd
, BIOCGHDRCMPLT
, &uval
) != 0) e(0);
1897 if (uval
!= 1) e(0);
1900 if (ioctl(ufd
, BIOCSHDRCMPLT
, &uval
) != 0) e(0);
1903 if (ioctl(ufd
, BIOCGHDRCMPLT
, &uval
) != 0) e(0);
1904 if (uval
!= 0) e(0);
1907 * BIOCSDLT works on configured devices. For loopback devices, it can
1908 * only set the data link type to its current value, which on MINIX3
1909 * for loopback devices is DLT_RAW (i.e., no headers at all).
1912 if (ioctl(ufd
, BIOCSDLT
, &uval
) != -1) e(0);
1913 if (errno
!= EINVAL
) e(0);
1916 if (ioctl(cfd
, BIOCSDLT
, &uval
) != 0) e(0);
1919 if (ioctl(cfd
, BIOCSDLT
, &uval
) != -1) e(0);
1920 if (errno
!= EINVAL
) e(0);
1922 if (ioctl(cfd
, BIOCGDLT
, &uval
) != 0) e(0);
1923 if (uval
!= DLT_RAW
) e(0);
1926 * BIOCGDLTLIST works on configured devices only, and may be used to
1927 * both query the size of the list and obtain the list. On MINIX3,
1928 * loopback devices will only ever return DLT_RAW. Unfortunately,
1929 * much of the handling for this IOCTL is in libc for us, which is also
1930 * why we do not test bad pointers and stuff like that.
1932 memset(&bfl
, 0, sizeof(bfl
));
1933 if (ioctl(ufd
, BIOCGDLTLIST
, &bfl
) != -1) e(0);
1934 if (errno
!= EINVAL
) e(0);
1936 memset(&bfl
, 0, sizeof(bfl
));
1937 if (ioctl(cfd
, BIOCGDLTLIST
, &bfl
) != 0) e(0);
1938 if (bfl
.bfl_len
!= 1) e(0);
1939 if (bfl
.bfl_list
!= NULL
) e(0);
1941 memset(&bfl
, 0, sizeof(bfl
));
1942 bfl
.bfl_len
= 2; /* should be ignored */
1943 if (ioctl(cfd
, BIOCGDLTLIST
, &bfl
) != 0) e(0);
1944 if (bfl
.bfl_len
!= 1) e(0);
1945 if (bfl
.bfl_list
!= NULL
) e(0);
1947 memset(&bfl
, 0, sizeof(bfl
));
1948 memset(list
, 0, sizeof(list
));
1949 bfl
.bfl_list
= list
;
1950 if (ioctl(cfd
, BIOCGDLTLIST
, &bfl
) != -1) e(0);
1951 if (errno
!= ENOMEM
) e(0);
1952 if (list
[0] != 0) e(0);
1954 memset(&bfl
, 0, sizeof(bfl
));
1956 bfl
.bfl_list
= list
;
1957 if (ioctl(cfd
, BIOCGDLTLIST
, &bfl
) != 0) e(0);
1958 if (bfl
.bfl_len
!= 1) e(0);
1959 if (bfl
.bfl_list
!= list
) e(0);
1960 if (list
[0] != DLT_RAW
) e(0);
1961 if (list
[1] != 0) e(0);
1963 memset(&bfl
, 0, sizeof(bfl
));
1964 memset(list
, 0, sizeof(list
));
1966 bfl
.bfl_list
= list
;
1967 if (ioctl(cfd
, BIOCGDLTLIST
, &bfl
) != 0) e(0);
1968 if (bfl
.bfl_len
!= 1) e(0);
1969 if (bfl
.bfl_list
!= list
) e(0);
1970 if (list
[0] != DLT_RAW
) e(0);
1971 if (list
[1] != 0) e(0);
1974 * For loopback devices, BIOCSSEESENT is a bit weird: packets are
1975 * captured on output to get a complete view of loopback traffic, and
1976 * not also on input because that would then duplicate the traffic. As
1977 * a result, turning off BIOCSSEESENT for a loopback device means that
1978 * no packets will be captured at all anymore. First test the default
1979 * and toggling on the unconfigured device, then reproduce the above on
1980 * the configured device.
1982 /* The default value is on. */
1984 if (ioctl(ufd
, BIOCGSEESENT
, &uval
) != 0) e(0);
1985 if (uval
!= 1) e(0);
1988 if (ioctl(ufd
, BIOCSSEESENT
, &uval
) != 0) e(0);
1991 if (ioctl(ufd
, BIOCGSEESENT
, &uval
) != 0) e(0);
1992 if (uval
!= 0) e(0);
1995 if (ioctl(ufd
, BIOCSSEESENT
, &uval
) != 0) e(0);
1997 if (ioctl(ufd
, BIOCGSEESENT
, &uval
) != 0) e(0);
1998 if (uval
!= 1) e(0);
2000 if (ioctl(cfd
, BIOCGSEESENT
, &uval
) != 0) e(0);
2001 if (uval
!= 1) e(0);
2004 if (ioctl(cfd
, BIOCSSEESENT
, &uval
) != 0) e(0);
2006 if (ioctl(cfd
, BIOCFLUSH
) != 0) e(0);
2008 test94_add_random(fd2
, buf
, size
, 1 /*seq*/);
2010 if (ioctl(cfd
, BIOCGSTATS
, &bs
) != 0) e(0);
2011 if (bs
.bs_recv
!= 0) e(0);
2014 if (ioctl(cfd
, BIOCSSEESENT
, &uval
) != 0) e(0);
2016 if (ioctl(cfd
, BIOCFLUSH
) != 0) e(0);
2018 test94_add_random(fd2
, buf
, size
, 1 /*seq*/);
2020 if (ioctl(cfd
, BIOCGSTATS
, &bs
) != 0) e(0);
2021 if (bs
.bs_recv
== 0) e(0);
2024 * The BIOCSRTIMEOUT values are rounded up to clock granularity.
2025 * Invalid timeout values are rejected.
2027 /* The default value is zero. */
2029 if (ioctl(ufd
, BIOCGRTIMEOUT
, &tv
) != 0) e(0);
2030 if (tv
.tv_sec
!= 0) e(0);
2031 if (tv
.tv_usec
!= 0) e(0);
2033 tv
.tv_usec
= 1000000;
2034 if (ioctl(ufd
, BIOCSRTIMEOUT
, &tv
) != -1) e(0);
2035 if (errno
!= EINVAL
) e(0);
2038 if (ioctl(ufd
, BIOCSRTIMEOUT
, &tv
) != -1) e(0);
2039 if (errno
!= EINVAL
) e(0);
2043 if (ioctl(ufd
, BIOCSRTIMEOUT
, &tv
) != -1) e(0);
2044 if (errno
!= EINVAL
) e(0);
2046 tv
.tv_sec
= INT_MAX
;
2047 if (ioctl(ufd
, BIOCSRTIMEOUT
, &tv
) != -1) e(0);
2048 if (errno
!= EDOM
) e(0);
2050 if (ioctl(ufd
, BIOCGRTIMEOUT
, &tv
) != 0) e(0);
2051 if (tv
.tv_sec
!= 0) e(0);
2052 if (tv
.tv_usec
!= 0) e(0);
2056 if (ioctl(ufd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
2058 if (ioctl(ufd
, BIOCGRTIMEOUT
, &tv
) != 0) e(0);
2059 if (tv
.tv_sec
!= 123) e(0);
2060 if (tv
.tv_usec
== 0) e(0); /* rounding should be up */
2064 if (ioctl(ufd
, BIOCSRTIMEOUT
, &tv
) != 0) e(0);
2066 if (ioctl(ufd
, BIOCGRTIMEOUT
, &tv
) != 0) e(0);
2067 if (tv
.tv_sec
!= 0) e(0);
2068 if (tv
.tv_usec
!= 0) e(0);
2071 * BIOCSFEEDBACK is another weird setting for which we only test
2072 * default and toggling here.
2074 /* The default value is off. */
2076 if (ioctl(ufd
, BIOCGFEEDBACK
, &uval
) != 0) e(0);
2077 if (uval
!= 0) e(0);
2080 if (ioctl(ufd
, BIOCSFEEDBACK
, &uval
) != 0) e(0);
2082 if (ioctl(ufd
, BIOCGFEEDBACK
, &uval
) != 0) e(0);
2083 if (uval
!= 1) e(0);
2086 if (ioctl(ufd
, BIOCSFEEDBACK
, &uval
) != 0) e(0);
2089 if (ioctl(ufd
, BIOCGFEEDBACK
, &uval
) != 0) e(0);
2090 if (uval
!= 0) e(0);
2093 if (close(ufd
) != 0) e(0);
2095 test94_cleanup(cfd
, fd2
, fd3
, buf
);
2098 /* IPv6 version of our filter. */
2099 static struct bpf_insn test94_filter6
[] = {
2100 { BPF_LD
+BPF_B
+BPF_ABS
, 0, 0, 0 }, /* is this an IPv6 header? */
2101 { BPF_ALU
+BPF_RSH
+BPF_K
, 0, 0, 4 },
2102 { BPF_JMP
+BPF_JEQ
+BPF_K
, 0, 6, 6 },
2103 { BPF_LD
+BPF_B
+BPF_ABS
, 0, 0, 6 }, /* is this a UDP packet? */
2104 { BPF_JMP
+BPF_JEQ
+BPF_K
, 0, 4, IPPROTO_UDP
},
2105 { BPF_LD
+BPF_H
+BPF_ABS
, 0, 0, 40 }, /* source port 12345? */
2106 { BPF_JMP
+BPF_JEQ
+BPF_K
, 0, 2, TEST_PORT_A
},
2107 { BPF_LD
+BPF_H
+BPF_ABS
, 0, 0, 42 }, /* destination port 12346? */
2108 { BPF_JMP
+BPF_JEQ
+BPF_K
, 1, 0, TEST_PORT_B
},
2109 { BPF_RET
+BPF_K
, 0, 0, 0 }, /* reject the packet */
2110 { BPF_RET
+BPF_K
, 0, 0, (uint32_t)-1 }, /* accept the (whole) packet */
2114 * Test receipt of IPv6 packets, because it was getting a bit messy to
2115 * integrate that into the previous subtests. We just want to make sure that
2116 * IPv6 packets are properly filtered and captured at all. The rest of the
2117 * code is entirely version agnostic anyway.
2122 struct sockaddr_in6 sin6A
, sin6B
;
2123 struct bpf_program bf
;
2133 unsigned int uval
, size
, dlt
;
2138 if ((fd
= open(_PATH_BPF
, O_RDWR
)) < 0) e(0);
2140 if (ioctl(fd
, BIOCGBLEN
, &size
) != 0) e(0);
2141 if (size
< 1024 || size
> BPF_MAXBUFSIZE
) e(0);
2143 if ((buf
= malloc(size
)) == NULL
) e(0);
2145 /* Install the filter. */
2146 memset(&bf
, 0, sizeof(bf
));
2147 bf
.bf_len
= __arraycount(test94_filter6
);
2148 bf
.bf_insns
= test94_filter6
;
2149 if (ioctl(fd
, BIOCSETF
, &bf
) != 0) e(0);
2152 if (ioctl(fd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
2154 /* Bind to the loopback device. */
2155 memset(&ifr
, 0, sizeof(ifr
));
2156 strlcpy(ifr
.ifr_name
, LOOPBACK_IFNAME
, sizeof(ifr
.ifr_name
));
2157 if (ioctl(fd
, BIOCSETIF
, &ifr
) != 0) e(0);
2160 * If the loopback device's data link type is not DLT_RAW, our filter
2161 * and size calculations will not work.
2163 if (ioctl(fd
, BIOCGDLT
, &dlt
) != 0) e(0);
2164 if (dlt
!= DLT_RAW
) e(0);
2166 /* We use UDP traffic for our test packets. */
2167 if ((fd2
= socket(AF_INET6
, SOCK_DGRAM
, 0)) < 0) e(0);
2169 memset(&sin6A
, 0, sizeof(sin6A
));
2170 sin6A
.sin6_family
= AF_INET6
;
2171 sin6A
.sin6_port
= htons(TEST_PORT_A
);
2172 memcpy(&sin6A
.sin6_addr
, &in6addr_loopback
, sizeof(sin6A
.sin6_addr
));
2173 if (bind(fd2
, (struct sockaddr
*)&sin6A
, sizeof(sin6A
)) != 0) e(0);
2175 memcpy(&sin6B
, &sin6A
, sizeof(sin6B
));
2176 sin6B
.sin6_port
= htons(TEST_PORT_B
);
2177 if (connect(fd2
, (struct sockaddr
*)&sin6B
, sizeof(sin6B
)) != 0) e(0);
2179 if ((fd3
= socket(AF_INET6
, SOCK_DGRAM
, 0)) < 0) e(0);
2181 if (bind(fd3
, (struct sockaddr
*)&sin6B
, sizeof(sin6B
)) != 0) e(0);
2183 if (connect(fd3
, (struct sockaddr
*)&sin6A
, sizeof(sin6A
)) != 0) e(0);
2185 if (write(fd2
, "A", 1) != 1) e(0);
2187 if (read(fd3
, &c
, 1) != 1) e(0);
2190 if (write(fd3
, "B", 1) != 1) e(0);
2192 if (read(fd2
, &c
, 1) != 1) e(0);
2195 if (ioctl(fd
, BIOCGSTATS
, &bs
) != 0) e(0);
2196 if (bs
.bs_recv
< 2) e(0);
2197 if (bs
.bs_capt
!= 1) e(0);
2198 if (bs
.bs_drop
!= 0) e(0);
2200 memset(buf
, 0, size
);
2202 len
= read(fd
, buf
, size
);
2204 if (len
!= BPF_WORDALIGN(sizeof(bh
)) +
2205 BPF_WORDALIGN(sizeof(ip6
) + sizeof(uh
) + 1)) e(0);
2207 memcpy(&bh
, buf
, sizeof(bh
));
2209 if (bh
.bh_tstamp
.tv_sec
== 0 && bh
.bh_tstamp
.tv_usec
== 0) e(0);
2210 if (bh
.bh_caplen
!= sizeof(ip6
) + sizeof(uh
) + 1) e(0);
2211 if (bh
.bh_datalen
!= bh
.bh_caplen
) e(0);
2212 if (bh
.bh_hdrlen
!= BPF_WORDALIGN(sizeof(bh
))) e(0);
2214 if (buf
[bh
.bh_hdrlen
+ sizeof(ip6
) + sizeof(uh
)] != 'A') e(0);
2217 * Finally, do a quick test to see if we can send IPv6 packets by
2218 * writing to the BPF device. We rely on such packets being generated
2219 * properly in a later test.
2221 off
= test94_make_pkt(buf
, 6, 1 /*v6*/);
2222 memcpy(buf
+ off
, "Hello!", 6);
2224 if (write(fd
, buf
, off
+ 6) != off
+ 6) e(0);
2226 socklen
= sizeof(sin6A
);
2227 if (recvfrom(fd3
, buf
, size
, 0, (struct sockaddr
*)&sin6A
,
2228 &socklen
) != 6) e(0);
2230 if (memcmp(buf
, "Hello!", 6) != 0) e(0);
2231 if (socklen
!= sizeof(sin6A
)) e(0);
2232 if (sin6A
.sin6_family
!= AF_INET6
) e(0);
2233 if (sin6A
.sin6_port
!= htons(TEST_PORT_A
)) e(0);
2234 if (memcmp(&sin6A
.sin6_addr
, &in6addr_loopback
,
2235 sizeof(sin6A
.sin6_addr
)) != 0) e(0);
2239 if (close(fd3
) != 0) e(0);
2241 if (close(fd2
) != 0) e(0);
2243 if (close(fd
) != 0) e(0);
2247 * Test the BPF sysctl(7) interface at a basic level.
2252 struct bpf_stat bs1
, bs2
;
2253 struct bpf_d_ext
*bde
;
2255 unsigned int slot
, count
, uval
;
2256 size_t len
, oldlen
, size
, bdesize
;
2257 int fd
, fd2
, fd3
, val
, mib
[5], smib
[3], found
;
2262 * Obtain the maximum buffer size. The value must be sane.
2264 memset(mib
, 0, sizeof(mib
));
2265 len
= __arraycount(mib
);
2266 if (sysctlnametomib("net.bpf.maxbufsize", mib
, &len
) != 0) e(0);
2269 oldlen
= sizeof(val
);
2270 if (sysctl(mib
, len
, &val
, &oldlen
, NULL
, 0) != 0) e(0);
2271 if (oldlen
!= sizeof(val
)) e(0);
2273 if (val
< 1024 || val
> INT_MAX
/ 2) e(0);
2276 * Attempt to set the maximum buffer size. This is not (yet) supported
2277 * so for now we want to make sure that it really does not work.
2279 if (sysctl(mib
, len
, NULL
, NULL
, &val
, sizeof(val
)) != -1) e(0);
2280 if (errno
!= EPERM
) e(0);
2283 * Obtain global statistics. We check the actual statistics later on.
2285 memset(smib
, 0, sizeof(smib
));
2286 len
= __arraycount(smib
);
2287 if (sysctlnametomib("net.bpf.stats", smib
, &len
) != 0) e(0);
2290 oldlen
= sizeof(bs1
);
2291 if (sysctl(smib
, len
, &bs1
, &oldlen
, NULL
, 0) != 0) e(0);
2292 if (oldlen
!= sizeof(bs1
)) e(0);
2295 * Set up a BPF descriptor, and retrieve the list of BPF peers. We
2296 * should be able to find our BPF peer.
2298 memset(mib
, 0, sizeof(mib
));
2299 len
= __arraycount(mib
);
2300 if (sysctlnametomib("net.bpf.peers", mib
, &len
) != 0) e(0);
2302 mib
[len
++] = sizeof(*bde
); /* size of each element */
2303 mib
[len
++] = INT_MAX
; /* limit on elements to return */
2305 size
= test94_setup(&fd
, &fd2
, &fd3
, &buf
, 0 /*size*/,
2308 /* Generate some traffic to bump the statistics. */
2309 count
= test94_fill_exact(fd2
, buf
, size
, 0);
2310 test94_fill_exact(fd2
, buf
, size
, 0);
2311 test94_fill_exact(fd2
, buf
, size
, 0);
2313 if (write(fd3
, "X", 1) != 1) e(0);
2315 if (sysctl(mib
, len
, NULL
, &oldlen
, NULL
, 0) != 0) e(0);
2316 if (oldlen
== 0) e(0);
2318 /* Add some slack space ourselves to prevent problems with churn. */
2319 bdesize
= oldlen
+ sizeof(*bde
) * 8;
2320 if ((bde
= malloc(bdesize
)) == NULL
) e(0);
2323 if (sysctl(mib
, len
, bde
, &oldlen
, NULL
, 0) != 0) e(0);
2324 if (oldlen
% sizeof(*bde
)) e(0);
2327 for (slot
= 0; slot
< oldlen
/ sizeof(*bde
); slot
++) {
2328 if (bde
[slot
].bde_pid
!= getpid())
2331 if (bde
[slot
].bde_bufsize
!= size
) e(0);
2332 if (bde
[slot
].bde_promisc
!= 0) e(0);
2333 if (bde
[slot
].bde_state
!= BPF_IDLE
) e(0);
2334 if (bde
[slot
].bde_immediate
!= 0) e(0);
2335 if (bde
[slot
].bde_hdrcmplt
!= 0) e(0);
2336 if (bde
[slot
].bde_seesent
!= 1) e(0);
2337 if (bde
[slot
].bde_rcount
< count
* 3 + 1) e(0);
2338 if (bde
[slot
].bde_dcount
!= count
) e(0);
2339 if (bde
[slot
].bde_ccount
!= count
* 3) e(0);
2340 if (strcmp(bde
[slot
].bde_ifname
, LOOPBACK_IFNAME
) != 0) e(0);
2344 if (found
!= 1) e(0);
2347 * If global statistics are an accumulation of individual devices'
2348 * statistics (they currently are not) then such a scheme should take
2349 * into account device flushes.
2351 if (ioctl(fd
, BIOCFLUSH
) != 0) e(0);
2353 test94_cleanup(fd
, fd2
, fd3
, buf
);
2356 * Now see if the global statistics have indeed changed correctly.
2358 oldlen
= sizeof(bs2
);
2359 if (sysctl(smib
, __arraycount(smib
), &bs2
, &oldlen
, NULL
, 0) != 0)
2361 if (oldlen
!= sizeof(bs2
)) e(0);
2363 if (bs2
.bs_recv
< bs1
.bs_recv
+ count
* 3 + 1) e(0);
2364 if (bs2
.bs_drop
!= bs1
.bs_drop
+ count
) e(0);
2365 if (bs2
.bs_capt
!= bs1
.bs_capt
+ count
* 3) e(0);
2368 * Check an unconfigured BPF device as well.
2370 if ((fd
= open(_PATH_BPF
, O_RDWR
)) < 0) e(0);
2373 * Toggle some flags. It is too much effort to test them all
2374 * individually (which, in the light of copy-paste mistakes, would be
2375 * the right thing to do) but at least we'll know something gets set.
2378 if (ioctl(fd
, BIOCIMMEDIATE
, &uval
) != 0) e(0);
2379 if (ioctl(fd
, BIOCSHDRCMPLT
, &uval
) != 0) e(0);
2382 if (ioctl(fd
, BIOCSSEESENT
, &uval
) != 0) e(0);
2385 if (sysctl(mib
, len
, bde
, &oldlen
, NULL
, 0) != 0) e(0);
2386 if (oldlen
% sizeof(*bde
)) e(0);
2389 for (slot
= 0; slot
< oldlen
/ sizeof(*bde
); slot
++) {
2390 if (bde
[slot
].bde_pid
!= getpid())
2393 if (bde
[slot
].bde_bufsize
!= size
) e(0);
2394 if (bde
[slot
].bde_promisc
!= 0) e(0);
2395 if (bde
[slot
].bde_state
!= BPF_IDLE
) e(0);
2396 if (bde
[slot
].bde_immediate
!= 1) e(0);
2397 if (bde
[slot
].bde_hdrcmplt
!= 1) e(0);
2398 if (bde
[slot
].bde_seesent
!= 0) e(0);
2399 if (bde
[slot
].bde_rcount
!= 0) e(0);
2400 if (bde
[slot
].bde_dcount
!= 0) e(0);
2401 if (bde
[slot
].bde_ccount
!= 0) e(0);
2402 if (bde
[slot
].bde_ifname
[0] != '\0') e(0);
2406 if (found
!= 1) e(0);
2411 * At this point there should be no BPF device left for our PID.
2414 if (sysctl(mib
, len
, bde
, &oldlen
, NULL
, 0) != 0) e(0);
2415 if (oldlen
% sizeof(*bde
)) e(0);
2417 for (slot
= 0; slot
< oldlen
/ sizeof(*bde
); slot
++)
2418 if (bde
[slot
].bde_pid
== getpid()) e(0);
2425 * Test privileged operations as an unprivileged caller.
2442 if ((pw
= getpwnam(NONROOT_USER
)) == NULL
) e(0);
2444 if (setuid(pw
->pw_uid
) != 0) e(0);
2447 * Opening /dev/bpf must fail. Note that this is a system
2448 * configuration issue rather than a LWIP service issue.
2450 if (open(_PATH_BPF
, O_RDWR
) != -1) e(0);
2451 if (errno
!= EACCES
) e(0);
2454 * Retrieving the net.bpf.peers list must fail, too.
2456 memset(mib
, 0, sizeof(mib
));
2457 len
= __arraycount(mib
);
2458 if (sysctlnametomib("net.bpf.peers", mib
, &len
) != 0) e(0);
2460 mib
[len
++] = sizeof(struct bpf_d_ext
);
2461 mib
[len
++] = INT_MAX
;
2463 if (sysctl(mib
, len
, NULL
, &oldlen
, NULL
, 0) != -1) e(0);
2464 if (errno
!= EPERM
) e(0);
2475 if (wait(&status
) != pid
) e(0);
2476 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
2480 * Test that traffic directed to loopback addresses be dropped on non-loopback
2481 * interfaces. In particular, inbound traffic to 127.0.0.1 and ::1 should not
2482 * be accepted on any interface that does not own those addresses. This test
2483 * is here because BPF feedback mode is (currently) the only way in which we
2484 * can generate inbound traffic the ethernet level, and even then only as a
2485 * side effect of sending outbound traffic. That is: this test sends the same
2486 * test packets to the local network! As such it must be performed only when
2487 * USENETWORK=yes and therefore at the user's risk.
2492 struct sockaddr_in sin
;
2493 struct sockaddr_in6 sin6
;
2494 struct sockaddr_dl sdl
;
2496 struct ifaddrs
*ifa
, *ifp
;
2497 struct if_data
*ifdata
;
2498 uint8_t buf
[sizeof(struct ether_header
) + MAX(sizeof(struct ip
),
2499 sizeof(struct ip6_hdr
)) + sizeof(struct udphdr
) + 6];
2500 struct ether_header ether
;
2501 const uint8_t ether_src
[ETHER_ADDR_LEN
] =
2502 { 0x02, 0x00, 0x01, 0x12, 0x34, 0x56 };
2509 if (!get_setting_use_network())
2512 memset(&ifr
, 0, sizeof(ifr
));
2513 memset(ðer
, 0, sizeof(ether
));
2516 * Start by finding a suitable ethernet interface that is up and of
2517 * which the link is not down. Without one, we cannot perform this
2518 * test. Save the interface name and the ethernet address.
2520 if (getifaddrs(&ifa
) != 0) e(0);
2522 for (ifp
= ifa
; ifp
!= NULL
; ifp
= ifp
->ifa_next
) {
2523 if (!(ifp
->ifa_flags
& IFF_UP
) || ifp
->ifa_addr
== NULL
||
2524 ifp
->ifa_addr
->sa_family
!= AF_LINK
)
2527 ifdata
= (struct if_data
*)ifp
->ifa_data
;
2528 if (ifdata
!= NULL
&& ifdata
->ifi_type
== IFT_ETHER
&&
2529 ifdata
->ifi_link_state
!= LINK_STATE_DOWN
) {
2530 strlcpy(ifr
.ifr_name
, ifp
->ifa_name
,
2531 sizeof(ifr
.ifr_name
));
2533 memcpy(&sdl
, (struct sockaddr_dl
*)ifp
->ifa_addr
,
2534 offsetof(struct sockaddr_dl
, sdl_data
));
2535 if (sdl
.sdl_alen
!= sizeof(ether
.ether_dhost
)) e(0);
2536 memcpy(ether
.ether_dhost
,
2537 ((struct sockaddr_dl
*)ifp
->ifa_addr
)->sdl_data
+
2538 sdl
.sdl_nlen
, sdl
.sdl_alen
);
2548 /* Open a BPF device and bind it to the ethernet interface we found. */
2549 if ((bfd
= open(_PATH_BPF
, O_RDWR
)) < 0) e(0);
2551 if (ioctl(bfd
, BIOCSETIF
, &ifr
) != 0) e(0);
2553 if (ioctl(bfd
, BIOCGDLT
, &val
) != 0) e(0);
2554 if (val
!= DLT_EN10MB
) e(0);
2557 if (ioctl(bfd
, BIOCSFEEDBACK
, &val
) != 0) e(0);
2559 /* We use UDP traffic for our test packets, IPv4 first. */
2560 if ((sfd
= socket(AF_INET
, SOCK_DGRAM
, 0)) < 0) e(0);
2562 memset(&sin
, 0, sizeof(sin
));
2563 sin
.sin_family
= AF_INET
;
2564 sin
.sin_port
= htons(TEST_PORT_B
);
2565 sin
.sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2566 if (bind(sfd
, (struct sockaddr
*)&sin
, sizeof(sin
)) != 0) e(0);
2569 * Construct and send a packet. We already filled in the ethernet
2570 * destination address. Put in a source address that is locally
2571 * administered but valid (and as such no reason for packet rejection).
2573 memcpy(ether
.ether_shost
, ether_src
, sizeof(ether
.ether_shost
));
2574 ether
.ether_type
= htons(ETHERTYPE_IP
);
2576 memcpy(buf
, ðer
, sizeof(ether
));
2577 off
= sizeof(ether
);
2578 off
+= test94_make_pkt(buf
+ off
, 6, 0 /*v6*/);
2579 if (off
+ 6 > sizeof(buf
)) e(0);
2580 memcpy(buf
+ off
, "Hello!", 6);
2582 if (write(bfd
, buf
, off
+ 6) != off
+ 6) e(0);
2584 /* The packet MUST NOT arrive. */
2585 if (recv(sfd
, buf
, sizeof(buf
), MSG_DONTWAIT
) != -1) e(0);
2586 if (errno
!= EWOULDBLOCK
) e(0);
2588 if (close(sfd
) != 0) e(0);
2590 /* Try the same thing, but now with an IPv6 packet. */
2591 if ((sfd
= socket(AF_INET6
, SOCK_DGRAM
, 0)) < 0) e(0);
2593 memset(&sin6
, 0, sizeof(sin6
));
2594 sin6
.sin6_family
= AF_INET6
;
2595 sin6
.sin6_port
= htons(TEST_PORT_B
);
2596 memcpy(&sin6
.sin6_addr
, &in6addr_loopback
, sizeof(sin6
.sin6_addr
));
2597 if (bind(sfd
, (struct sockaddr
*)&sin6
, sizeof(sin6
)) != 0) e(0);
2599 ether
.ether_type
= htons(ETHERTYPE_IPV6
);
2601 memcpy(buf
, ðer
, sizeof(ether
));
2602 off
= sizeof(ether
);
2603 off
+= test94_make_pkt(buf
+ off
, 6, 1 /*v6*/);
2604 if (off
+ 6 > sizeof(buf
)) e(0);
2605 memcpy(buf
+ off
, "Hello!", 6);
2607 if (write(bfd
, buf
, off
+ 6) != off
+ 6) e(0);
2609 if (recv(sfd
, buf
, sizeof(buf
), MSG_DONTWAIT
) != -1) e(0);
2610 if (errno
!= EWOULDBLOCK
) e(0);
2612 if (close(sfd
) != 0) e(0);
2613 if (close(bfd
) != 0) e(0);
2617 * Test program for LWIP BPF.
2620 main(int argc
, char ** argv
)
2626 srand48(time(NULL
));
2633 for (i
= 0; i
< ITERATIONS
; i
++) {
2634 if (m
& 0x001) test94a();
2635 if (m
& 0x002) test94b();
2636 if (m
& 0x004) test94c();
2637 if (m
& 0x008) test94d();
2638 if (m
& 0x010) test94e();
2639 if (m
& 0x020) test94f();
2640 if (m
& 0x040) test94g();
2641 if (m
& 0x080) test94h();
2642 if (m
& 0x100) test94i();
2643 if (m
& 0x200) test94j();
2644 if (m
& 0x400) test94k();
2645 if (m
& 0x800) test94l();