sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / kernel / bpf / core.c
blob1eb4f1303756164f2893d964198eecd40b0bcd62
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 * Authors:
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
32 #include <asm/unaligned.h>
34 /* Registers */
35 #define BPF_R0 regs[BPF_REG_0]
36 #define BPF_R1 regs[BPF_REG_1]
37 #define BPF_R2 regs[BPF_REG_2]
38 #define BPF_R3 regs[BPF_REG_3]
39 #define BPF_R4 regs[BPF_REG_4]
40 #define BPF_R5 regs[BPF_REG_5]
41 #define BPF_R6 regs[BPF_REG_6]
42 #define BPF_R7 regs[BPF_REG_7]
43 #define BPF_R8 regs[BPF_REG_8]
44 #define BPF_R9 regs[BPF_REG_9]
45 #define BPF_R10 regs[BPF_REG_10]
47 /* Named registers */
48 #define DST regs[insn->dst_reg]
49 #define SRC regs[insn->src_reg]
50 #define FP regs[BPF_REG_FP]
51 #define ARG1 regs[BPF_REG_ARG1]
52 #define CTX regs[BPF_REG_CTX]
53 #define IMM insn->imm
55 /* No hurry in this branch
57 * Exported for the bpf jit load helper.
59 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
61 u8 *ptr = NULL;
63 if (k >= SKF_NET_OFF)
64 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
65 else if (k >= SKF_LL_OFF)
66 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
68 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
69 return ptr;
71 return NULL;
74 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
76 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
77 gfp_extra_flags;
78 struct bpf_prog_aux *aux;
79 struct bpf_prog *fp;
81 size = round_up(size, PAGE_SIZE);
82 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
83 if (fp == NULL)
84 return NULL;
86 kmemcheck_annotate_bitfield(fp, meta);
88 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
90 vfree(fp);
91 return NULL;
94 fp->pages = size / PAGE_SIZE;
95 fp->aux = aux;
96 fp->aux->prog = fp;
98 return fp;
100 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
102 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
103 gfp_t gfp_extra_flags)
105 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
106 gfp_extra_flags;
107 struct bpf_prog *fp;
108 u32 pages, delta;
109 int ret;
111 BUG_ON(fp_old == NULL);
113 size = round_up(size, PAGE_SIZE);
114 pages = size / PAGE_SIZE;
115 if (pages <= fp_old->pages)
116 return fp_old;
118 delta = pages - fp_old->pages;
119 ret = __bpf_prog_charge(fp_old->aux->user, delta);
120 if (ret)
121 return NULL;
123 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
124 if (fp == NULL) {
125 __bpf_prog_uncharge(fp_old->aux->user, delta);
126 } else {
127 kmemcheck_annotate_bitfield(fp, meta);
129 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
130 fp->pages = pages;
131 fp->aux->prog = fp;
133 /* We keep fp->aux from fp_old around in the new
134 * reallocated structure.
136 fp_old->aux = NULL;
137 __bpf_prog_free(fp_old);
140 return fp;
143 void __bpf_prog_free(struct bpf_prog *fp)
145 kfree(fp->aux);
146 vfree(fp);
149 int bpf_prog_calc_digest(struct bpf_prog *fp)
151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
152 u32 raw_size = bpf_prog_digest_scratch_size(fp);
153 u32 ws[SHA_WORKSPACE_WORDS];
154 u32 i, bsize, psize, blocks;
155 struct bpf_insn *dst;
156 bool was_ld_map;
157 u8 *raw, *todo;
158 __be32 *result;
159 __be64 *bits;
161 raw = vmalloc(raw_size);
162 if (!raw)
163 return -ENOMEM;
165 sha_init(fp->digest);
166 memset(ws, 0, sizeof(ws));
168 /* We need to take out the map fd for the digest calculation
169 * since they are unstable from user space side.
171 dst = (void *)raw;
172 for (i = 0, was_ld_map = false; i < fp->len; i++) {
173 dst[i] = fp->insnsi[i];
174 if (!was_ld_map &&
175 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
176 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
177 was_ld_map = true;
178 dst[i].imm = 0;
179 } else if (was_ld_map &&
180 dst[i].code == 0 &&
181 dst[i].dst_reg == 0 &&
182 dst[i].src_reg == 0 &&
183 dst[i].off == 0) {
184 was_ld_map = false;
185 dst[i].imm = 0;
186 } else {
187 was_ld_map = false;
191 psize = bpf_prog_insn_size(fp);
192 memset(&raw[psize], 0, raw_size - psize);
193 raw[psize++] = 0x80;
195 bsize = round_up(psize, SHA_MESSAGE_BYTES);
196 blocks = bsize / SHA_MESSAGE_BYTES;
197 todo = raw;
198 if (bsize - psize >= sizeof(__be64)) {
199 bits = (__be64 *)(todo + bsize - sizeof(__be64));
200 } else {
201 bits = (__be64 *)(todo + bsize + bits_offset);
202 blocks++;
204 *bits = cpu_to_be64((psize - 1) << 3);
206 while (blocks--) {
207 sha_transform(fp->digest, todo, ws);
208 todo += SHA_MESSAGE_BYTES;
211 result = (__force __be32 *)fp->digest;
212 for (i = 0; i < SHA_DIGEST_WORDS; i++)
213 result[i] = cpu_to_be32(fp->digest[i]);
215 vfree(raw);
216 return 0;
219 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
221 return BPF_CLASS(insn->code) == BPF_JMP &&
222 /* Call and Exit are both special jumps with no
223 * target inside the BPF instruction image.
225 BPF_OP(insn->code) != BPF_CALL &&
226 BPF_OP(insn->code) != BPF_EXIT;
229 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
231 struct bpf_insn *insn = prog->insnsi;
232 u32 i, insn_cnt = prog->len;
234 for (i = 0; i < insn_cnt; i++, insn++) {
235 if (!bpf_is_jmp_and_has_target(insn))
236 continue;
238 /* Adjust offset of jmps if we cross boundaries. */
239 if (i < pos && i + insn->off + 1 > pos)
240 insn->off += delta;
241 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
242 insn->off -= delta;
246 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
247 const struct bpf_insn *patch, u32 len)
249 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
250 struct bpf_prog *prog_adj;
252 /* Since our patchlet doesn't expand the image, we're done. */
253 if (insn_delta == 0) {
254 memcpy(prog->insnsi + off, patch, sizeof(*patch));
255 return prog;
258 insn_adj_cnt = prog->len + insn_delta;
260 /* Several new instructions need to be inserted. Make room
261 * for them. Likely, there's no need for a new allocation as
262 * last page could have large enough tailroom.
264 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
265 GFP_USER);
266 if (!prog_adj)
267 return NULL;
269 prog_adj->len = insn_adj_cnt;
271 /* Patching happens in 3 steps:
273 * 1) Move over tail of insnsi from next instruction onwards,
274 * so we can patch the single target insn with one or more
275 * new ones (patching is always from 1 to n insns, n > 0).
276 * 2) Inject new instructions at the target location.
277 * 3) Adjust branch offsets if necessary.
279 insn_rest = insn_adj_cnt - off - len;
281 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
282 sizeof(*patch) * insn_rest);
283 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
285 bpf_adj_branches(prog_adj, off, insn_delta);
287 return prog_adj;
290 #ifdef CONFIG_BPF_JIT
291 struct bpf_binary_header *
292 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
293 unsigned int alignment,
294 bpf_jit_fill_hole_t bpf_fill_ill_insns)
296 struct bpf_binary_header *hdr;
297 unsigned int size, hole, start;
299 /* Most of BPF filters are really small, but if some of them
300 * fill a page, allow at least 128 extra bytes to insert a
301 * random section of illegal instructions.
303 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
304 hdr = module_alloc(size);
305 if (hdr == NULL)
306 return NULL;
308 /* Fill space with illegal/arch-dep instructions. */
309 bpf_fill_ill_insns(hdr, size);
311 hdr->pages = size / PAGE_SIZE;
312 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
313 PAGE_SIZE - sizeof(*hdr));
314 start = (get_random_int() % hole) & ~(alignment - 1);
316 /* Leave a random number of instructions before BPF code. */
317 *image_ptr = &hdr->image[start];
319 return hdr;
322 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
324 module_memfree(hdr);
327 int bpf_jit_harden __read_mostly;
329 static int bpf_jit_blind_insn(const struct bpf_insn *from,
330 const struct bpf_insn *aux,
331 struct bpf_insn *to_buff)
333 struct bpf_insn *to = to_buff;
334 u32 imm_rnd = get_random_int();
335 s16 off;
337 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
338 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
340 if (from->imm == 0 &&
341 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
342 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
343 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
344 goto out;
347 switch (from->code) {
348 case BPF_ALU | BPF_ADD | BPF_K:
349 case BPF_ALU | BPF_SUB | BPF_K:
350 case BPF_ALU | BPF_AND | BPF_K:
351 case BPF_ALU | BPF_OR | BPF_K:
352 case BPF_ALU | BPF_XOR | BPF_K:
353 case BPF_ALU | BPF_MUL | BPF_K:
354 case BPF_ALU | BPF_MOV | BPF_K:
355 case BPF_ALU | BPF_DIV | BPF_K:
356 case BPF_ALU | BPF_MOD | BPF_K:
357 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
358 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
359 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
360 break;
362 case BPF_ALU64 | BPF_ADD | BPF_K:
363 case BPF_ALU64 | BPF_SUB | BPF_K:
364 case BPF_ALU64 | BPF_AND | BPF_K:
365 case BPF_ALU64 | BPF_OR | BPF_K:
366 case BPF_ALU64 | BPF_XOR | BPF_K:
367 case BPF_ALU64 | BPF_MUL | BPF_K:
368 case BPF_ALU64 | BPF_MOV | BPF_K:
369 case BPF_ALU64 | BPF_DIV | BPF_K:
370 case BPF_ALU64 | BPF_MOD | BPF_K:
371 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
372 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
373 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
374 break;
376 case BPF_JMP | BPF_JEQ | BPF_K:
377 case BPF_JMP | BPF_JNE | BPF_K:
378 case BPF_JMP | BPF_JGT | BPF_K:
379 case BPF_JMP | BPF_JGE | BPF_K:
380 case BPF_JMP | BPF_JSGT | BPF_K:
381 case BPF_JMP | BPF_JSGE | BPF_K:
382 case BPF_JMP | BPF_JSET | BPF_K:
383 /* Accommodate for extra offset in case of a backjump. */
384 off = from->off;
385 if (off < 0)
386 off -= 2;
387 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
388 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
389 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
390 break;
392 case BPF_LD | BPF_ABS | BPF_W:
393 case BPF_LD | BPF_ABS | BPF_H:
394 case BPF_LD | BPF_ABS | BPF_B:
395 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
396 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
397 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
398 break;
400 case BPF_LD | BPF_IND | BPF_W:
401 case BPF_LD | BPF_IND | BPF_H:
402 case BPF_LD | BPF_IND | BPF_B:
403 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
404 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
405 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
406 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
407 break;
409 case BPF_LD | BPF_IMM | BPF_DW:
410 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
411 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
412 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
413 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
414 break;
415 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
416 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
417 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
418 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
419 break;
421 case BPF_ST | BPF_MEM | BPF_DW:
422 case BPF_ST | BPF_MEM | BPF_W:
423 case BPF_ST | BPF_MEM | BPF_H:
424 case BPF_ST | BPF_MEM | BPF_B:
425 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
426 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
427 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
428 break;
430 out:
431 return to - to_buff;
434 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
435 gfp_t gfp_extra_flags)
437 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
438 gfp_extra_flags;
439 struct bpf_prog *fp;
441 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
442 if (fp != NULL) {
443 kmemcheck_annotate_bitfield(fp, meta);
445 /* aux->prog still points to the fp_other one, so
446 * when promoting the clone to the real program,
447 * this still needs to be adapted.
449 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
452 return fp;
455 static void bpf_prog_clone_free(struct bpf_prog *fp)
457 /* aux was stolen by the other clone, so we cannot free
458 * it from this path! It will be freed eventually by the
459 * other program on release.
461 * At this point, we don't need a deferred release since
462 * clone is guaranteed to not be locked.
464 fp->aux = NULL;
465 __bpf_prog_free(fp);
468 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
470 /* We have to repoint aux->prog to self, as we don't
471 * know whether fp here is the clone or the original.
473 fp->aux->prog = fp;
474 bpf_prog_clone_free(fp_other);
477 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
479 struct bpf_insn insn_buff[16], aux[2];
480 struct bpf_prog *clone, *tmp;
481 int insn_delta, insn_cnt;
482 struct bpf_insn *insn;
483 int i, rewritten;
485 if (!bpf_jit_blinding_enabled())
486 return prog;
488 clone = bpf_prog_clone_create(prog, GFP_USER);
489 if (!clone)
490 return ERR_PTR(-ENOMEM);
492 insn_cnt = clone->len;
493 insn = clone->insnsi;
495 for (i = 0; i < insn_cnt; i++, insn++) {
496 /* We temporarily need to hold the original ld64 insn
497 * so that we can still access the first part in the
498 * second blinding run.
500 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
501 insn[1].code == 0)
502 memcpy(aux, insn, sizeof(aux));
504 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
505 if (!rewritten)
506 continue;
508 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
509 if (!tmp) {
510 /* Patching may have repointed aux->prog during
511 * realloc from the original one, so we need to
512 * fix it up here on error.
514 bpf_jit_prog_release_other(prog, clone);
515 return ERR_PTR(-ENOMEM);
518 clone = tmp;
519 insn_delta = rewritten - 1;
521 /* Walk new program and skip insns we just inserted. */
522 insn = clone->insnsi + i + insn_delta;
523 insn_cnt += insn_delta;
524 i += insn_delta;
527 return clone;
529 #endif /* CONFIG_BPF_JIT */
531 /* Base function for offset calculation. Needs to go into .text section,
532 * therefore keeping it non-static as well; will also be used by JITs
533 * anyway later on, so do not let the compiler omit it.
535 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
537 return 0;
539 EXPORT_SYMBOL_GPL(__bpf_call_base);
542 * __bpf_prog_run - run eBPF program on a given context
543 * @ctx: is the data we are operating on
544 * @insn: is the array of eBPF instructions
546 * Decode and execute eBPF instructions.
548 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
550 u64 stack[MAX_BPF_STACK / sizeof(u64)];
551 u64 regs[MAX_BPF_REG], tmp;
552 static const void *jumptable[256] = {
553 [0 ... 255] = &&default_label,
554 /* Now overwrite non-defaults ... */
555 /* 32 bit ALU operations */
556 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
557 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
558 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
559 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
560 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
561 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
562 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
563 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
564 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
565 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
566 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
567 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
568 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
569 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
570 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
571 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
572 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
573 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
574 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
575 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
576 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
577 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
578 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
579 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
580 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
581 /* 64 bit ALU operations */
582 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
583 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
584 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
585 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
586 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
587 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
588 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
589 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
590 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
591 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
592 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
593 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
594 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
595 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
596 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
597 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
598 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
599 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
600 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
601 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
602 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
603 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
604 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
605 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
606 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
607 /* Call instruction */
608 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
609 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
610 /* Jumps */
611 [BPF_JMP | BPF_JA] = &&JMP_JA,
612 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
613 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
614 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
615 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
616 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
617 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
618 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
619 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
620 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
621 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
622 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
623 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
624 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
625 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
626 /* Program return */
627 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
628 /* Store instructions */
629 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
630 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
631 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
632 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
633 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
634 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
635 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
636 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
637 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
638 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
639 /* Load instructions */
640 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
641 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
642 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
643 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
644 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
645 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
646 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
647 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
648 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
649 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
650 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
652 u32 tail_call_cnt = 0;
653 void *ptr;
654 int off;
656 #define CONT ({ insn++; goto select_insn; })
657 #define CONT_JMP ({ insn++; goto select_insn; })
659 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
660 ARG1 = (u64) (unsigned long) ctx;
662 select_insn:
663 goto *jumptable[insn->code];
665 /* ALU */
666 #define ALU(OPCODE, OP) \
667 ALU64_##OPCODE##_X: \
668 DST = DST OP SRC; \
669 CONT; \
670 ALU_##OPCODE##_X: \
671 DST = (u32) DST OP (u32) SRC; \
672 CONT; \
673 ALU64_##OPCODE##_K: \
674 DST = DST OP IMM; \
675 CONT; \
676 ALU_##OPCODE##_K: \
677 DST = (u32) DST OP (u32) IMM; \
678 CONT;
680 ALU(ADD, +)
681 ALU(SUB, -)
682 ALU(AND, &)
683 ALU(OR, |)
684 ALU(LSH, <<)
685 ALU(RSH, >>)
686 ALU(XOR, ^)
687 ALU(MUL, *)
688 #undef ALU
689 ALU_NEG:
690 DST = (u32) -DST;
691 CONT;
692 ALU64_NEG:
693 DST = -DST;
694 CONT;
695 ALU_MOV_X:
696 DST = (u32) SRC;
697 CONT;
698 ALU_MOV_K:
699 DST = (u32) IMM;
700 CONT;
701 ALU64_MOV_X:
702 DST = SRC;
703 CONT;
704 ALU64_MOV_K:
705 DST = IMM;
706 CONT;
707 LD_IMM_DW:
708 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
709 insn++;
710 CONT;
711 ALU64_ARSH_X:
712 (*(s64 *) &DST) >>= SRC;
713 CONT;
714 ALU64_ARSH_K:
715 (*(s64 *) &DST) >>= IMM;
716 CONT;
717 ALU64_MOD_X:
718 if (unlikely(SRC == 0))
719 return 0;
720 div64_u64_rem(DST, SRC, &tmp);
721 DST = tmp;
722 CONT;
723 ALU_MOD_X:
724 if (unlikely(SRC == 0))
725 return 0;
726 tmp = (u32) DST;
727 DST = do_div(tmp, (u32) SRC);
728 CONT;
729 ALU64_MOD_K:
730 div64_u64_rem(DST, IMM, &tmp);
731 DST = tmp;
732 CONT;
733 ALU_MOD_K:
734 tmp = (u32) DST;
735 DST = do_div(tmp, (u32) IMM);
736 CONT;
737 ALU64_DIV_X:
738 if (unlikely(SRC == 0))
739 return 0;
740 DST = div64_u64(DST, SRC);
741 CONT;
742 ALU_DIV_X:
743 if (unlikely(SRC == 0))
744 return 0;
745 tmp = (u32) DST;
746 do_div(tmp, (u32) SRC);
747 DST = (u32) tmp;
748 CONT;
749 ALU64_DIV_K:
750 DST = div64_u64(DST, IMM);
751 CONT;
752 ALU_DIV_K:
753 tmp = (u32) DST;
754 do_div(tmp, (u32) IMM);
755 DST = (u32) tmp;
756 CONT;
757 ALU_END_TO_BE:
758 switch (IMM) {
759 case 16:
760 DST = (__force u16) cpu_to_be16(DST);
761 break;
762 case 32:
763 DST = (__force u32) cpu_to_be32(DST);
764 break;
765 case 64:
766 DST = (__force u64) cpu_to_be64(DST);
767 break;
769 CONT;
770 ALU_END_TO_LE:
771 switch (IMM) {
772 case 16:
773 DST = (__force u16) cpu_to_le16(DST);
774 break;
775 case 32:
776 DST = (__force u32) cpu_to_le32(DST);
777 break;
778 case 64:
779 DST = (__force u64) cpu_to_le64(DST);
780 break;
782 CONT;
784 /* CALL */
785 JMP_CALL:
786 /* Function call scratches BPF_R1-BPF_R5 registers,
787 * preserves BPF_R6-BPF_R9, and stores return value
788 * into BPF_R0.
790 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
791 BPF_R4, BPF_R5);
792 CONT;
794 JMP_TAIL_CALL: {
795 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
796 struct bpf_array *array = container_of(map, struct bpf_array, map);
797 struct bpf_prog *prog;
798 u64 index = BPF_R3;
800 if (unlikely(index >= array->map.max_entries))
801 goto out;
802 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
803 goto out;
805 tail_call_cnt++;
807 prog = READ_ONCE(array->ptrs[index]);
808 if (!prog)
809 goto out;
811 /* ARG1 at this point is guaranteed to point to CTX from
812 * the verifier side due to the fact that the tail call is
813 * handeled like a helper, that is, bpf_tail_call_proto,
814 * where arg1_type is ARG_PTR_TO_CTX.
816 insn = prog->insnsi;
817 goto select_insn;
818 out:
819 CONT;
821 /* JMP */
822 JMP_JA:
823 insn += insn->off;
824 CONT;
825 JMP_JEQ_X:
826 if (DST == SRC) {
827 insn += insn->off;
828 CONT_JMP;
830 CONT;
831 JMP_JEQ_K:
832 if (DST == IMM) {
833 insn += insn->off;
834 CONT_JMP;
836 CONT;
837 JMP_JNE_X:
838 if (DST != SRC) {
839 insn += insn->off;
840 CONT_JMP;
842 CONT;
843 JMP_JNE_K:
844 if (DST != IMM) {
845 insn += insn->off;
846 CONT_JMP;
848 CONT;
849 JMP_JGT_X:
850 if (DST > SRC) {
851 insn += insn->off;
852 CONT_JMP;
854 CONT;
855 JMP_JGT_K:
856 if (DST > IMM) {
857 insn += insn->off;
858 CONT_JMP;
860 CONT;
861 JMP_JGE_X:
862 if (DST >= SRC) {
863 insn += insn->off;
864 CONT_JMP;
866 CONT;
867 JMP_JGE_K:
868 if (DST >= IMM) {
869 insn += insn->off;
870 CONT_JMP;
872 CONT;
873 JMP_JSGT_X:
874 if (((s64) DST) > ((s64) SRC)) {
875 insn += insn->off;
876 CONT_JMP;
878 CONT;
879 JMP_JSGT_K:
880 if (((s64) DST) > ((s64) IMM)) {
881 insn += insn->off;
882 CONT_JMP;
884 CONT;
885 JMP_JSGE_X:
886 if (((s64) DST) >= ((s64) SRC)) {
887 insn += insn->off;
888 CONT_JMP;
890 CONT;
891 JMP_JSGE_K:
892 if (((s64) DST) >= ((s64) IMM)) {
893 insn += insn->off;
894 CONT_JMP;
896 CONT;
897 JMP_JSET_X:
898 if (DST & SRC) {
899 insn += insn->off;
900 CONT_JMP;
902 CONT;
903 JMP_JSET_K:
904 if (DST & IMM) {
905 insn += insn->off;
906 CONT_JMP;
908 CONT;
909 JMP_EXIT:
910 return BPF_R0;
912 /* STX and ST and LDX*/
913 #define LDST(SIZEOP, SIZE) \
914 STX_MEM_##SIZEOP: \
915 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
916 CONT; \
917 ST_MEM_##SIZEOP: \
918 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
919 CONT; \
920 LDX_MEM_##SIZEOP: \
921 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
922 CONT;
924 LDST(B, u8)
925 LDST(H, u16)
926 LDST(W, u32)
927 LDST(DW, u64)
928 #undef LDST
929 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
930 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
931 (DST + insn->off));
932 CONT;
933 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
934 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
935 (DST + insn->off));
936 CONT;
937 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
938 off = IMM;
939 load_word:
940 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
941 * only appearing in the programs where ctx ==
942 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
943 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
944 * internal BPF verifier will check that BPF_R6 ==
945 * ctx.
947 * BPF_ABS and BPF_IND are wrappers of function calls,
948 * so they scratch BPF_R1-BPF_R5 registers, preserve
949 * BPF_R6-BPF_R9, and store return value into BPF_R0.
951 * Implicit input:
952 * ctx == skb == BPF_R6 == CTX
954 * Explicit input:
955 * SRC == any register
956 * IMM == 32-bit immediate
958 * Output:
959 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
962 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
963 if (likely(ptr != NULL)) {
964 BPF_R0 = get_unaligned_be32(ptr);
965 CONT;
968 return 0;
969 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
970 off = IMM;
971 load_half:
972 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
973 if (likely(ptr != NULL)) {
974 BPF_R0 = get_unaligned_be16(ptr);
975 CONT;
978 return 0;
979 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
980 off = IMM;
981 load_byte:
982 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
983 if (likely(ptr != NULL)) {
984 BPF_R0 = *(u8 *)ptr;
985 CONT;
988 return 0;
989 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
990 off = IMM + SRC;
991 goto load_word;
992 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
993 off = IMM + SRC;
994 goto load_half;
995 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
996 off = IMM + SRC;
997 goto load_byte;
999 default_label:
1000 /* If we ever reach this, we have a bug somewhere. */
1001 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1002 return 0;
1004 STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
1006 bool bpf_prog_array_compatible(struct bpf_array *array,
1007 const struct bpf_prog *fp)
1009 if (!array->owner_prog_type) {
1010 /* There's no owner yet where we could check for
1011 * compatibility.
1013 array->owner_prog_type = fp->type;
1014 array->owner_jited = fp->jited;
1016 return true;
1019 return array->owner_prog_type == fp->type &&
1020 array->owner_jited == fp->jited;
1023 static int bpf_check_tail_call(const struct bpf_prog *fp)
1025 struct bpf_prog_aux *aux = fp->aux;
1026 int i;
1028 for (i = 0; i < aux->used_map_cnt; i++) {
1029 struct bpf_map *map = aux->used_maps[i];
1030 struct bpf_array *array;
1032 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1033 continue;
1035 array = container_of(map, struct bpf_array, map);
1036 if (!bpf_prog_array_compatible(array, fp))
1037 return -EINVAL;
1040 return 0;
1044 * bpf_prog_select_runtime - select exec runtime for BPF program
1045 * @fp: bpf_prog populated with internal BPF program
1046 * @err: pointer to error variable
1048 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1049 * The BPF program will be executed via BPF_PROG_RUN() macro.
1051 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1053 fp->bpf_func = (void *) __bpf_prog_run;
1055 /* eBPF JITs can rewrite the program in case constant
1056 * blinding is active. However, in case of error during
1057 * blinding, bpf_int_jit_compile() must always return a
1058 * valid program, which in this case would simply not
1059 * be JITed, but falls back to the interpreter.
1061 fp = bpf_int_jit_compile(fp);
1062 bpf_prog_lock_ro(fp);
1064 /* The tail call compatibility check can only be done at
1065 * this late stage as we need to determine, if we deal
1066 * with JITed or non JITed program concatenations and not
1067 * all eBPF JITs might immediately support all features.
1069 *err = bpf_check_tail_call(fp);
1071 return fp;
1073 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1075 static void bpf_prog_free_deferred(struct work_struct *work)
1077 struct bpf_prog_aux *aux;
1079 aux = container_of(work, struct bpf_prog_aux, work);
1080 bpf_jit_free(aux->prog);
1083 /* Free internal BPF program */
1084 void bpf_prog_free(struct bpf_prog *fp)
1086 struct bpf_prog_aux *aux = fp->aux;
1088 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1089 schedule_work(&aux->work);
1091 EXPORT_SYMBOL_GPL(bpf_prog_free);
1093 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1094 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1096 void bpf_user_rnd_init_once(void)
1098 prandom_init_once(&bpf_user_rnd_state);
1101 BPF_CALL_0(bpf_user_rnd_u32)
1103 /* Should someone ever have the rather unwise idea to use some
1104 * of the registers passed into this function, then note that
1105 * this function is called from native eBPF and classic-to-eBPF
1106 * transformations. Register assignments from both sides are
1107 * different, f.e. classic always sets fn(ctx, A, X) here.
1109 struct rnd_state *state;
1110 u32 res;
1112 state = &get_cpu_var(bpf_user_rnd_state);
1113 res = prandom_u32_state(state);
1114 put_cpu_var(bpf_user_rnd_state);
1116 return res;
1119 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1120 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1121 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1122 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1124 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1125 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1126 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1127 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1129 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1130 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1131 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1133 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1135 return NULL;
1138 u64 __weak
1139 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1140 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1142 return -ENOTSUPP;
1145 /* Always built-in helper functions. */
1146 const struct bpf_func_proto bpf_tail_call_proto = {
1147 .func = NULL,
1148 .gpl_only = false,
1149 .ret_type = RET_VOID,
1150 .arg1_type = ARG_PTR_TO_CTX,
1151 .arg2_type = ARG_CONST_MAP_PTR,
1152 .arg3_type = ARG_ANYTHING,
1155 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
1156 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1158 return prog;
1161 bool __weak bpf_helper_changes_pkt_data(void *func)
1163 return false;
1166 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1167 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1169 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1170 int len)
1172 return -EFAULT;