2 * Copyright (C) 2016-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/kernel.h>
37 #include <linux/pkt_cls.h>
42 #define pr_vlog(env, fmt, ...) \
43 bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
45 struct nfp_insn_meta
*
46 nfp_bpf_goto_meta(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
47 unsigned int insn_idx
, unsigned int n_insns
)
49 unsigned int forward
, backward
, i
;
51 backward
= meta
->n
- insn_idx
;
52 forward
= insn_idx
- meta
->n
;
54 if (min(forward
, backward
) > n_insns
- insn_idx
- 1) {
55 backward
= n_insns
- insn_idx
- 1;
56 meta
= nfp_prog_last_meta(nfp_prog
);
58 if (min(forward
, backward
) > insn_idx
&& backward
> insn_idx
) {
60 meta
= nfp_prog_first_meta(nfp_prog
);
63 if (forward
< backward
)
64 for (i
= 0; i
< forward
; i
++)
65 meta
= nfp_meta_next(meta
);
67 for (i
= 0; i
< backward
; i
++)
68 meta
= nfp_meta_prev(meta
);
74 nfp_record_adjust_head(struct nfp_app_bpf
*bpf
, struct nfp_prog
*nfp_prog
,
75 struct nfp_insn_meta
*meta
,
76 const struct bpf_reg_state
*reg2
)
78 unsigned int location
= UINT_MAX
;
81 /* Datapath usually can give us guarantees on how much adjust head
82 * can be done without the need for any checks. Optimize the simple
83 * case where there is only one adjust head by a constant.
85 if (reg2
->type
!= SCALAR_VALUE
|| !tnum_is_const(reg2
->var_off
))
86 goto exit_set_location
;
87 imm
= reg2
->var_off
.value
;
88 /* Translator will skip all checks, we need to guarantee min pkt len */
89 if (imm
> ETH_ZLEN
- ETH_HLEN
)
90 goto exit_set_location
;
91 if (imm
> (int)bpf
->adjust_head
.guaranteed_add
||
92 imm
< -bpf
->adjust_head
.guaranteed_sub
)
93 goto exit_set_location
;
95 if (nfp_prog
->adjust_head_location
) {
96 /* Only one call per program allowed */
97 if (nfp_prog
->adjust_head_location
!= meta
->n
)
98 goto exit_set_location
;
100 if (meta
->arg2
.var_off
.value
!= imm
)
101 goto exit_set_location
;
106 nfp_prog
->adjust_head_location
= location
;
110 nfp_bpf_check_call(struct nfp_prog
*nfp_prog
, struct bpf_verifier_env
*env
,
111 struct nfp_insn_meta
*meta
)
113 const struct bpf_reg_state
*reg1
= cur_regs(env
) + BPF_REG_1
;
114 const struct bpf_reg_state
*reg2
= cur_regs(env
) + BPF_REG_2
;
115 struct nfp_app_bpf
*bpf
= nfp_prog
->bpf
;
116 u32 func_id
= meta
->insn
.imm
;
120 case BPF_FUNC_xdp_adjust_head
:
121 if (!bpf
->adjust_head
.off_max
) {
122 pr_vlog(env
, "adjust_head not supported by FW\n");
125 if (!(bpf
->adjust_head
.flags
& NFP_BPF_ADJUST_HEAD_NO_META
)) {
126 pr_vlog(env
, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
130 nfp_record_adjust_head(bpf
, nfp_prog
, meta
, reg2
);
133 case BPF_FUNC_map_lookup_elem
:
134 if (!bpf
->helpers
.map_lookup
) {
135 pr_vlog(env
, "map_lookup: not supported by FW\n");
138 if (reg2
->type
!= PTR_TO_STACK
) {
140 "map_lookup: unsupported key ptr type %d\n",
144 if (!tnum_is_const(reg2
->var_off
)) {
145 pr_vlog(env
, "map_lookup: variable key pointer\n");
149 off
= reg2
->var_off
.value
+ reg2
->off
;
152 "map_lookup: unaligned stack pointer %lld\n",
157 /* Rest of the checks is only if we re-parse the same insn */
161 old_off
= meta
->arg2
.var_off
.value
+ meta
->arg2
.off
;
162 meta
->arg2_var_off
|= off
!= old_off
;
164 if (meta
->arg1
.map_ptr
!= reg1
->map_ptr
) {
165 pr_vlog(env
, "map_lookup: called for different map\n");
170 pr_vlog(env
, "unsupported function id: %d\n", func_id
);
174 meta
->func_id
= func_id
;
182 nfp_bpf_check_exit(struct nfp_prog
*nfp_prog
,
183 struct bpf_verifier_env
*env
)
185 const struct bpf_reg_state
*reg0
= cur_regs(env
) + BPF_REG_0
;
188 if (nfp_prog
->type
== BPF_PROG_TYPE_XDP
)
191 if (!(reg0
->type
== SCALAR_VALUE
&& tnum_is_const(reg0
->var_off
))) {
194 tnum_strn(tn_buf
, sizeof(tn_buf
), reg0
->var_off
);
195 pr_vlog(env
, "unsupported exit state: %d, var_off: %s\n",
200 imm
= reg0
->var_off
.value
;
201 if (nfp_prog
->type
== BPF_PROG_TYPE_SCHED_CLS
&&
202 imm
<= TC_ACT_REDIRECT
&&
203 imm
!= TC_ACT_SHOT
&& imm
!= TC_ACT_STOLEN
&&
204 imm
!= TC_ACT_QUEUED
) {
205 pr_vlog(env
, "unsupported exit state: %d, imm: %llx\n",
214 nfp_bpf_check_stack_access(struct nfp_prog
*nfp_prog
,
215 struct nfp_insn_meta
*meta
,
216 const struct bpf_reg_state
*reg
,
217 struct bpf_verifier_env
*env
)
219 s32 old_off
, new_off
;
221 if (!tnum_is_const(reg
->var_off
)) {
222 pr_vlog(env
, "variable ptr stack access\n");
226 if (meta
->ptr
.type
== NOT_INIT
)
229 old_off
= meta
->ptr
.off
+ meta
->ptr
.var_off
.value
;
230 new_off
= reg
->off
+ reg
->var_off
.value
;
232 meta
->ptr_not_const
|= old_off
!= new_off
;
234 if (!meta
->ptr_not_const
)
237 if (old_off
% 4 == new_off
% 4)
240 pr_vlog(env
, "stack access changed location was:%d is:%d\n",
246 nfp_bpf_check_ptr(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
247 struct bpf_verifier_env
*env
, u8 reg_no
)
249 const struct bpf_reg_state
*reg
= cur_regs(env
) + reg_no
;
252 if (reg
->type
!= PTR_TO_CTX
&&
253 reg
->type
!= PTR_TO_STACK
&&
254 reg
->type
!= PTR_TO_MAP_VALUE
&&
255 reg
->type
!= PTR_TO_PACKET
) {
256 pr_vlog(env
, "unsupported ptr type: %d\n", reg
->type
);
260 if (reg
->type
== PTR_TO_STACK
) {
261 err
= nfp_bpf_check_stack_access(nfp_prog
, meta
, reg
, env
);
266 if (reg
->type
== PTR_TO_MAP_VALUE
) {
267 if (is_mbpf_store(meta
)) {
268 pr_vlog(env
, "map writes not supported\n");
273 if (meta
->ptr
.type
!= NOT_INIT
&& meta
->ptr
.type
!= reg
->type
) {
274 pr_vlog(env
, "ptr type changed for instruction %d -> %d\n",
275 meta
->ptr
.type
, reg
->type
);
285 nfp_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn_idx
)
287 struct nfp_prog
*nfp_prog
= env
->prog
->aux
->offload
->dev_priv
;
288 struct nfp_insn_meta
*meta
= nfp_prog
->verifier_meta
;
290 meta
= nfp_bpf_goto_meta(nfp_prog
, meta
, insn_idx
, env
->prog
->len
);
291 nfp_prog
->verifier_meta
= meta
;
293 if (!nfp_bpf_supported_opcode(meta
->insn
.code
)) {
294 pr_vlog(env
, "instruction %#02x not supported\n",
299 if (meta
->insn
.src_reg
>= MAX_BPF_REG
||
300 meta
->insn
.dst_reg
>= MAX_BPF_REG
) {
301 pr_vlog(env
, "program uses extended registers - jit hardening?\n");
305 if (meta
->insn
.code
== (BPF_JMP
| BPF_CALL
))
306 return nfp_bpf_check_call(nfp_prog
, env
, meta
);
307 if (meta
->insn
.code
== (BPF_JMP
| BPF_EXIT
))
308 return nfp_bpf_check_exit(nfp_prog
, env
);
310 if (is_mbpf_load(meta
))
311 return nfp_bpf_check_ptr(nfp_prog
, meta
, env
,
313 if (is_mbpf_store(meta
))
314 return nfp_bpf_check_ptr(nfp_prog
, meta
, env
,
320 const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops
= {
321 .insn_hook
= nfp_verify_insn
,