1 /* $NetBSD: bpf_filter.c,v 1.70 2015/02/11 12:53:15 alnsn Exp $ */
4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.70 2015/02/11 12:53:15 alnsn Exp $");
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid
[] =
45 "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp (LBL)";
49 #include <sys/param.h>
52 #include <sys/endian.h>
62 return kmem_zalloc(sizeof(bpf_ctx_t
), KM_SLEEP
);
66 bpf_destroy(bpf_ctx_t
*bc
)
68 kmem_free(bc
, sizeof(bpf_ctx_t
));
72 bpf_set_cop(bpf_ctx_t
*bc
, const bpf_copfunc_t
*funcs
, size_t n
)
80 bpf_set_extmem(bpf_ctx_t
*bc
, size_t nwords
, bpf_memword_init_t preinited
)
82 if (nwords
> BPF_MAX_MEMWORDS
|| (preinited
>> nwords
) != 0) {
85 bc
->extwords
= nwords
;
86 bc
->preinited
= preinited
;
92 #define EXTRACT_SHORT(p) be16dec(p)
93 #define EXTRACT_LONG(p) be32dec(p)
97 #define MINDEX(len, m, k) \
109 uint32_t m_xword(const struct mbuf
*, uint32_t, int *);
110 uint32_t m_xhalf(const struct mbuf
*, uint32_t, int *);
111 uint32_t m_xbyte(const struct mbuf
*, uint32_t, int *);
113 #define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
114 #define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
115 #define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
118 m_xword(const struct mbuf
*m
, uint32_t k
, int *err
)
126 cp
= mtod(m
, u_char
*) + k
;
129 return EXTRACT_LONG(cp
);
132 if (m0
== 0 || (len
- k
) + m0
->m_len
< 4)
135 np
= mtod(m0
, u_char
*);
139 return (cp
[0] << 24) | (np
[0] << 16) | (np
[1] << 8) | np
[2];
141 return (cp
[0] << 24) | (cp
[1] << 16) | (np
[0] << 8) | np
[1];
143 return (cp
[0] << 24) | (cp
[1] << 16) | (cp
[2] << 8) | np
[0];
148 m_xhalf(const struct mbuf
*m
, uint32_t k
, int *err
)
156 cp
= mtod(m
, u_char
*) + k
;
159 return EXTRACT_SHORT(cp
);
165 return (cp
[0] << 8) | mtod(m0
, u_char
*)[0];
169 m_xbyte(const struct mbuf
*m
, uint32_t k
, int *err
)
176 return mtod(m
, u_char
*)[k
];
180 #endif /* !_KERNEL */
185 * Execute the filter program starting at pc on the packet p
186 * wirelen is the length of the original packet
187 * buflen is the amount of data present
192 bpf_filter(const struct bpf_insn
*pc
, const u_char
*p
, u_int wirelen
,
195 uint32_t mem
[BPF_MEMWORDS
];
204 return bpf_filter_ext(NULL
, pc
, &args
);
208 bpf_filter_ext(const bpf_ctx_t
*bc
, const struct bpf_insn
*pc
, bpf_args_t
*args
)
211 bpf_filter(const struct bpf_insn
*pc
, const u_char
*p
, u_int wirelen
,
217 uint32_t mem
[BPF_MEMWORDS
];
218 bpf_args_t args_store
= {
225 bpf_args_t
* const args
= &args_store
;
227 const uint8_t * const p
= args
->pkt
;
231 * No filter means accept all.
237 * Note: safe to leave memwords uninitialised, as the validation
238 * step ensures that it will not be read, if it was not written.
261 case BPF_LD
|BPF_W
|BPF_ABS
:
263 if (k
> args
->buflen
||
264 sizeof(int32_t) > args
->buflen
- k
) {
268 if (args
->buflen
!= 0)
270 A
= xword(args
->pkt
, k
, &merr
);
278 A
= EXTRACT_LONG(&p
[k
]);
281 case BPF_LD
|BPF_H
|BPF_ABS
:
283 if (k
> args
->buflen
||
284 sizeof(int16_t) > args
->buflen
- k
) {
288 if (args
->buflen
!= 0)
290 A
= xhalf(args
->pkt
, k
, &merr
);
298 A
= EXTRACT_SHORT(&p
[k
]);
301 case BPF_LD
|BPF_B
|BPF_ABS
:
303 if (k
>= args
->buflen
) {
307 if (args
->buflen
!= 0)
309 A
= xbyte(args
->pkt
, k
, &merr
);
320 case BPF_LD
|BPF_W
|BPF_LEN
:
324 case BPF_LDX
|BPF_W
|BPF_LEN
:
328 case BPF_LD
|BPF_W
|BPF_IND
:
330 if (k
< X
|| k
>= args
->buflen
||
331 sizeof(int32_t) > args
->buflen
- k
) {
335 if (k
< X
|| args
->buflen
!= 0)
337 A
= xword(args
->pkt
, k
, &merr
);
345 A
= EXTRACT_LONG(&p
[k
]);
348 case BPF_LD
|BPF_H
|BPF_IND
:
350 if (k
< X
|| k
>= args
->buflen
||
351 sizeof(int16_t) > args
->buflen
- k
) {
355 if (k
< X
|| args
->buflen
!= 0)
357 A
= xhalf(args
->pkt
, k
, &merr
);
365 A
= EXTRACT_SHORT(&p
[k
]);
368 case BPF_LD
|BPF_B
|BPF_IND
:
370 if (k
< X
|| k
>= args
->buflen
) {
374 if (k
< X
|| args
->buflen
!= 0)
376 A
= xbyte(args
->pkt
, k
, &merr
);
387 case BPF_LDX
|BPF_MSH
|BPF_B
:
389 if (k
>= args
->buflen
) {
393 if (args
->buflen
!= 0)
395 X
= (xbyte(args
->pkt
, k
, &merr
) & 0xf) << 2;
403 X
= (p
[pc
->k
] & 0xf) << 2;
410 case BPF_LDX
|BPF_IMM
:
415 A
= args
->mem
[pc
->k
];
418 case BPF_LDX
|BPF_MEM
:
419 X
= args
->mem
[pc
->k
];
423 args
->mem
[pc
->k
] = A
;
427 args
->mem
[pc
->k
] = X
;
434 case BPF_JMP
|BPF_JGT
|BPF_K
:
435 pc
+= (A
> pc
->k
) ? pc
->jt
: pc
->jf
;
438 case BPF_JMP
|BPF_JGE
|BPF_K
:
439 pc
+= (A
>= pc
->k
) ? pc
->jt
: pc
->jf
;
442 case BPF_JMP
|BPF_JEQ
|BPF_K
:
443 pc
+= (A
== pc
->k
) ? pc
->jt
: pc
->jf
;
446 case BPF_JMP
|BPF_JSET
|BPF_K
:
447 pc
+= (A
& pc
->k
) ? pc
->jt
: pc
->jf
;
450 case BPF_JMP
|BPF_JGT
|BPF_X
:
451 pc
+= (A
> X
) ? pc
->jt
: pc
->jf
;
454 case BPF_JMP
|BPF_JGE
|BPF_X
:
455 pc
+= (A
>= X
) ? pc
->jt
: pc
->jf
;
458 case BPF_JMP
|BPF_JEQ
|BPF_X
:
459 pc
+= (A
== X
) ? pc
->jt
: pc
->jf
;
462 case BPF_JMP
|BPF_JSET
|BPF_X
:
463 pc
+= (A
& X
) ? pc
->jt
: pc
->jf
;
466 case BPF_ALU
|BPF_ADD
|BPF_X
:
470 case BPF_ALU
|BPF_SUB
|BPF_X
:
474 case BPF_ALU
|BPF_MUL
|BPF_X
:
478 case BPF_ALU
|BPF_DIV
|BPF_X
:
484 case BPF_ALU
|BPF_MOD
|BPF_X
:
490 case BPF_ALU
|BPF_AND
|BPF_X
:
494 case BPF_ALU
|BPF_OR
|BPF_X
:
498 case BPF_ALU
|BPF_XOR
|BPF_X
:
502 case BPF_ALU
|BPF_LSH
|BPF_X
:
506 case BPF_ALU
|BPF_RSH
|BPF_X
:
510 case BPF_ALU
|BPF_ADD
|BPF_K
:
514 case BPF_ALU
|BPF_SUB
|BPF_K
:
518 case BPF_ALU
|BPF_MUL
|BPF_K
:
522 case BPF_ALU
|BPF_DIV
|BPF_K
:
526 case BPF_ALU
|BPF_MOD
|BPF_K
:
530 case BPF_ALU
|BPF_AND
|BPF_K
:
534 case BPF_ALU
|BPF_OR
|BPF_K
:
538 case BPF_ALU
|BPF_XOR
|BPF_K
:
542 case BPF_ALU
|BPF_LSH
|BPF_K
:
546 case BPF_ALU
|BPF_RSH
|BPF_K
:
550 case BPF_ALU
|BPF_NEG
:
554 case BPF_MISC
|BPF_TAX
:
558 case BPF_MISC
|BPF_TXA
:
562 case BPF_MISC
|BPF_COP
:
564 if (pc
->k
< bc
->nfuncs
) {
565 const bpf_copfunc_t fn
= bc
->copfuncs
[pc
->k
];
572 case BPF_MISC
|BPF_COPX
:
574 if (X
< bc
->nfuncs
) {
575 const bpf_copfunc_t fn
= bc
->copfuncs
[X
];
586 * Return true if the 'fcode' is a valid filter program.
587 * The constraints are that each jump be forward and to a valid
588 * code, that memory accesses are within valid ranges (to the
589 * extent that this can be checked statically; loads of packet
590 * data have to be, and are, also checked at run time), and that
591 * the code terminates with either an accept or reject.
593 * The kernel needs to be able to verify an application's filter code.
594 * Otherwise, a bogus program could easily crash the system.
597 #if defined(KERNEL) || defined(_KERNEL)
600 bpf_validate(const struct bpf_insn
*f
, int signed_len
)
602 return bpf_validate_ext(NULL
, f
, signed_len
);
606 bpf_validate_ext(const bpf_ctx_t
*bc
, const struct bpf_insn
*f
, int signed_len
)
609 bpf_validate(const struct bpf_insn
*f
, int signed_len
)
612 u_int i
, from
, len
, ok
= 0;
613 const struct bpf_insn
*p
;
614 #if defined(KERNEL) || defined(_KERNEL)
615 bpf_memword_init_t
*mem
, invalid
;
617 const size_t extwords
= bc
? bc
->extwords
: 0;
618 const size_t memwords
= extwords
? extwords
: BPF_MEMWORDS
;
619 const bpf_memword_init_t preinited
= extwords
? bc
->preinited
: 0;
621 const size_t memwords
= BPF_MEMWORDS
;
624 len
= (u_int
)signed_len
;
627 #if defined(KERNEL) || defined(_KERNEL)
628 if (len
> BPF_MAXINSNS
)
631 if (f
[len
- 1].code
!= (BPF_RET
|BPF_K
) &&
632 f
[len
- 1].code
!= (BPF_RET
|BPF_A
)) {
636 #if defined(KERNEL) || defined(_KERNEL)
637 /* Note: only the pre-initialised is valid on startup */
638 mem
= kmem_zalloc(size
= sizeof(*mem
) * len
, KM_SLEEP
);
639 invalid
= ~preinited
;
642 for (i
= 0; i
< len
; ++i
) {
643 #if defined(KERNEL) || defined(_KERNEL)
644 /* blend in any invalid bits for current pc */
648 switch (BPF_CLASS(p
->code
)) {
650 * Check that memory operations use valid addresses.
654 switch (BPF_MODE(p
->code
)) {
657 * There's no maximum packet data size
658 * in userland. The runtime packet length
661 #if defined(KERNEL) || defined(_KERNEL)
663 * More strict check with actual packet length
666 if (p
->k
>= memwords
)
668 /* check for current memory invalid */
669 if (invalid
& BPF_MEMWORD_INIT(p
->k
))
685 if (p
->k
>= memwords
)
687 #if defined(KERNEL) || defined(_KERNEL)
688 /* validate the memory word */
689 invalid
&= ~BPF_MEMWORD_INIT(p
->k
);
693 switch (BPF_OP(p
->code
)) {
707 * Check for constant division by 0.
709 if (BPF_SRC(p
->code
) == BPF_K
&& p
->k
== 0)
718 * Check that jumps are within the code block,
719 * and that unconditional branches don't go
720 * backwards as a result of an overflow.
721 * Unconditional branches have a 32-bit offset,
722 * so they could overflow; we check to make
723 * sure they don't. Conditional branches have
724 * an 8-bit offset, and the from address is <=
725 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
726 * is sufficiently small that adding 255 to it
729 * We know that len is <= BPF_MAXINSNS, and we
730 * assume that BPF_MAXINSNS is < the maximum size
731 * of a u_int, so that i + 1 doesn't overflow.
733 * For userland, we don't know that the from
734 * or len are <= BPF_MAXINSNS, but we know that
735 * from <= len, and, except on a 64-bit system,
736 * it's unlikely that len, if it truly reflects
737 * the size of the program we've been handed,
738 * will be anywhere near the maximum size of
739 * a u_int. We also don't check for backward
740 * branches, as we currently support them in
741 * userland for the protochain operation.
744 switch (BPF_OP(p
->code
)) {
746 if (from
+ p
->k
>= len
)
748 #if defined(KERNEL) || defined(_KERNEL)
749 if (from
+ p
->k
< from
)
752 * mark the currently invalid bits for the
755 mem
[from
+ p
->k
] |= invalid
;
763 if (from
+ p
->jt
>= len
|| from
+ p
->jf
>= len
)
765 #if defined(KERNEL) || defined(_KERNEL)
767 * mark the currently invalid bits for both
768 * possible jump destinations
770 mem
[from
+ p
->jt
] |= invalid
;
771 mem
[from
+ p
->jf
] |= invalid
;
782 switch (BPF_MISCOP(p
->code
)) {
785 /* In-kernel COP use only. */
786 #if defined(KERNEL) || defined(_KERNEL)
787 if (bc
== NULL
|| bc
->copfuncs
== NULL
)
789 if (BPF_MISCOP(p
->code
) == BPF_COP
&&
790 p
->k
>= bc
->nfuncs
) {
807 #if defined(KERNEL) || defined(_KERNEL)
808 kmem_free(mem
, size
);