1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2019 Intel Corporation. */
4 #include <linux/hash.h>
6 #include <linux/filter.h>
7 #include <linux/static_call.h>
9 /* The BPF dispatcher is a multiway branch code generator. The
10 * dispatcher is a mechanism to avoid the performance penalty of an
11 * indirect call, which is expensive when retpolines are enabled. A
12 * dispatch client registers a BPF program into the dispatcher, and if
13 * there is available room in the dispatcher a direct call to the BPF
14 * program will be generated. All calls to the BPF programs called via
15 * the dispatcher will then be a direct call, instead of an
16 * indirect. The dispatcher hijacks a trampoline function it via the
17 * __fentry__ of the trampoline. The trampoline function has the
18 * following signature:
20 * unsigned int trampoline(const void *ctx, const struct bpf_insn *insnsi,
21 * unsigned int (*bpf_func)(const void *,
22 * const struct bpf_insn *));
25 static struct bpf_dispatcher_prog
*bpf_dispatcher_find_prog(
26 struct bpf_dispatcher
*d
, struct bpf_prog
*prog
)
30 for (i
= 0; i
< BPF_DISPATCHER_MAX
; i
++) {
31 if (prog
== d
->progs
[i
].prog
)
37 static struct bpf_dispatcher_prog
*bpf_dispatcher_find_free(
38 struct bpf_dispatcher
*d
)
40 return bpf_dispatcher_find_prog(d
, NULL
);
43 static bool bpf_dispatcher_add_prog(struct bpf_dispatcher
*d
,
44 struct bpf_prog
*prog
)
46 struct bpf_dispatcher_prog
*entry
;
51 entry
= bpf_dispatcher_find_prog(d
, prog
);
53 refcount_inc(&entry
->users
);
57 entry
= bpf_dispatcher_find_free(d
);
63 refcount_set(&entry
->users
, 1);
68 static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher
*d
,
69 struct bpf_prog
*prog
)
71 struct bpf_dispatcher_prog
*entry
;
76 entry
= bpf_dispatcher_find_prog(d
, prog
);
80 if (refcount_dec_and_test(&entry
->users
)) {
89 int __weak
arch_prepare_bpf_dispatcher(void *image
, void *buf
, s64
*funcs
, int num_funcs
)
94 static int bpf_dispatcher_prepare(struct bpf_dispatcher
*d
, void *image
, void *buf
)
96 s64 ips
[BPF_DISPATCHER_MAX
] = {}, *ipsp
= &ips
[0];
99 for (i
= 0; i
< BPF_DISPATCHER_MAX
; i
++) {
100 if (d
->progs
[i
].prog
)
101 *ipsp
++ = (s64
)(uintptr_t)d
->progs
[i
].prog
->bpf_func
;
103 return arch_prepare_bpf_dispatcher(image
, buf
, &ips
[0], d
->num_progs
);
106 static void bpf_dispatcher_update(struct bpf_dispatcher
*d
, int prev_num_progs
)
112 noff
= d
->image_off
^ (PAGE_SIZE
/ 2);
114 new = d
->num_progs
? d
->image
+ noff
: NULL
;
115 tmp
= d
->num_progs
? d
->rw_image
+ noff
: NULL
;
117 /* Prepare the dispatcher in d->rw_image. Then use
118 * bpf_arch_text_copy to update d->image, which is RO+X.
120 if (bpf_dispatcher_prepare(d
, new, tmp
))
122 if (IS_ERR(bpf_arch_text_copy(new, tmp
, PAGE_SIZE
/ 2)))
126 __BPF_DISPATCHER_UPDATE(d
, new ?: (void *)&bpf_dispatcher_nop_func
);
128 /* Make sure all the callers executing the previous/old half of the
129 * image leave it, so following update call can modify it safely.
137 void bpf_dispatcher_change_prog(struct bpf_dispatcher
*d
, struct bpf_prog
*from
,
140 bool changed
= false;
146 mutex_lock(&d
->mutex
);
148 d
->image
= bpf_prog_pack_alloc(PAGE_SIZE
, bpf_jit_fill_hole_with_zero
);
151 d
->rw_image
= bpf_jit_alloc_exec(PAGE_SIZE
);
153 bpf_prog_pack_free(d
->image
, PAGE_SIZE
);
157 bpf_image_ksym_init(d
->image
, PAGE_SIZE
, &d
->ksym
);
158 bpf_image_ksym_add(&d
->ksym
);
161 prev_num_progs
= d
->num_progs
;
162 changed
|= bpf_dispatcher_remove_prog(d
, from
);
163 changed
|= bpf_dispatcher_add_prog(d
, to
);
168 bpf_dispatcher_update(d
, prev_num_progs
);
170 mutex_unlock(&d
->mutex
);