1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
5 #include <linux/bpf_verifier.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
14 enum bpf_struct_ops_state
{
15 BPF_STRUCT_OPS_STATE_INIT
,
16 BPF_STRUCT_OPS_STATE_INUSE
,
17 BPF_STRUCT_OPS_STATE_TOBEFREE
,
20 #define BPF_STRUCT_OPS_COMMON_VALUE \
22 enum bpf_struct_ops_state state
24 struct bpf_struct_ops_value
{
25 BPF_STRUCT_OPS_COMMON_VALUE
;
26 char data
[0] ____cacheline_aligned_in_smp
;
29 struct bpf_struct_ops_map
{
31 const struct bpf_struct_ops
*st_ops
;
32 /* protect map_update */
34 /* progs has all the bpf_prog that is populated
35 * to the func ptr of the kernel's struct
38 struct bpf_prog
**progs
;
39 /* image is a page that has all the trampolines
40 * that stores the func args before calling the bpf_prog.
41 * A PAGE_SIZE "image" is enough to store all trampoline for
45 /* uvalue->data stores the kernel struct
46 * (e.g. tcp_congestion_ops) that is more useful
47 * to userspace than the kvalue. For example,
48 * the bpf_prog's id is stored instead of the kernel
49 * address of a func ptr.
51 struct bpf_struct_ops_value
*uvalue
;
52 /* kvalue.data stores the actual kernel's struct
53 * (e.g. tcp_congestion_ops) that will be
54 * registered to the kernel subsystem.
56 struct bpf_struct_ops_value kvalue
;
59 #define VALUE_PREFIX "bpf_struct_ops_"
60 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
62 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
63 * the map's value exposed to the userspace and its btf-type-id is
64 * stored at the map->btf_vmlinux_value_type_id.
67 #define BPF_STRUCT_OPS_TYPE(_name) \
68 extern struct bpf_struct_ops bpf_##_name; \
70 struct bpf_struct_ops_##_name { \
71 BPF_STRUCT_OPS_COMMON_VALUE; \
72 struct _name data ____cacheline_aligned_in_smp; \
74 #include "bpf_struct_ops_types.h"
75 #undef BPF_STRUCT_OPS_TYPE
78 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
79 #include "bpf_struct_ops_types.h"
80 #undef BPF_STRUCT_OPS_TYPE
81 __NR_BPF_STRUCT_OPS_TYPE
,
84 static struct bpf_struct_ops
* const bpf_struct_ops
[] = {
85 #define BPF_STRUCT_OPS_TYPE(_name) \
86 [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
87 #include "bpf_struct_ops_types.h"
88 #undef BPF_STRUCT_OPS_TYPE
91 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops
= {
94 const struct bpf_prog_ops bpf_struct_ops_prog_ops
= {
97 static const struct btf_type
*module_type
;
99 void bpf_struct_ops_init(struct btf
*btf
, struct bpf_verifier_log
*log
)
101 s32 type_id
, value_id
, module_id
;
102 const struct btf_member
*member
;
103 struct bpf_struct_ops
*st_ops
;
104 const struct btf_type
*t
;
105 char value_name
[128];
109 /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
110 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
111 #include "bpf_struct_ops_types.h"
112 #undef BPF_STRUCT_OPS_TYPE
114 module_id
= btf_find_by_name_kind(btf
, "module", BTF_KIND_STRUCT
);
116 pr_warn("Cannot find struct module in btf_vmlinux\n");
119 module_type
= btf_type_by_id(btf
, module_id
);
121 for (i
= 0; i
< ARRAY_SIZE(bpf_struct_ops
); i
++) {
122 st_ops
= bpf_struct_ops
[i
];
124 if (strlen(st_ops
->name
) + VALUE_PREFIX_LEN
>=
125 sizeof(value_name
)) {
126 pr_warn("struct_ops name %s is too long\n",
130 sprintf(value_name
, "%s%s", VALUE_PREFIX
, st_ops
->name
);
132 value_id
= btf_find_by_name_kind(btf
, value_name
,
135 pr_warn("Cannot find struct %s in btf_vmlinux\n",
140 type_id
= btf_find_by_name_kind(btf
, st_ops
->name
,
143 pr_warn("Cannot find struct %s in btf_vmlinux\n",
147 t
= btf_type_by_id(btf
, type_id
);
148 if (btf_type_vlen(t
) > BPF_STRUCT_OPS_MAX_NR_MEMBERS
) {
149 pr_warn("Cannot support #%u members in struct %s\n",
150 btf_type_vlen(t
), st_ops
->name
);
154 for_each_member(j
, t
, member
) {
155 const struct btf_type
*func_proto
;
157 mname
= btf_name_by_offset(btf
, member
->name_off
);
159 pr_warn("anon member in struct %s is not supported\n",
164 if (btf_member_bitfield_size(t
, member
)) {
165 pr_warn("bit field member %s in struct %s is not supported\n",
166 mname
, st_ops
->name
);
170 func_proto
= btf_type_resolve_func_ptr(btf
,
174 btf_distill_func_proto(log
, btf
,
176 &st_ops
->func_models
[j
])) {
177 pr_warn("Error in parsing func ptr %s in struct %s\n",
178 mname
, st_ops
->name
);
183 if (j
== btf_type_vlen(t
)) {
184 if (st_ops
->init(btf
)) {
185 pr_warn("Error in init bpf_struct_ops %s\n",
188 st_ops
->type_id
= type_id
;
190 st_ops
->value_id
= value_id
;
191 st_ops
->value_type
= btf_type_by_id(btf
,
198 extern struct btf
*btf_vmlinux
;
200 static const struct bpf_struct_ops
*
201 bpf_struct_ops_find_value(u32 value_id
)
205 if (!value_id
|| !btf_vmlinux
)
208 for (i
= 0; i
< ARRAY_SIZE(bpf_struct_ops
); i
++) {
209 if (bpf_struct_ops
[i
]->value_id
== value_id
)
210 return bpf_struct_ops
[i
];
216 const struct bpf_struct_ops
*bpf_struct_ops_find(u32 type_id
)
220 if (!type_id
|| !btf_vmlinux
)
223 for (i
= 0; i
< ARRAY_SIZE(bpf_struct_ops
); i
++) {
224 if (bpf_struct_ops
[i
]->type_id
== type_id
)
225 return bpf_struct_ops
[i
];
231 static int bpf_struct_ops_map_get_next_key(struct bpf_map
*map
, void *key
,
234 if (key
&& *(u32
*)key
== 0)
237 *(u32
*)next_key
= 0;
241 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map
*map
, void *key
,
244 struct bpf_struct_ops_map
*st_map
= (struct bpf_struct_ops_map
*)map
;
245 struct bpf_struct_ops_value
*uvalue
, *kvalue
;
246 enum bpf_struct_ops_state state
;
248 if (unlikely(*(u32
*)key
!= 0))
251 kvalue
= &st_map
->kvalue
;
252 /* Pair with smp_store_release() during map_update */
253 state
= smp_load_acquire(&kvalue
->state
);
254 if (state
== BPF_STRUCT_OPS_STATE_INIT
) {
255 memset(value
, 0, map
->value_size
);
259 /* No lock is needed. state and refcnt do not need
260 * to be updated together under atomic context.
262 uvalue
= (struct bpf_struct_ops_value
*)value
;
263 memcpy(uvalue
, st_map
->uvalue
, map
->value_size
);
264 uvalue
->state
= state
;
265 refcount_set(&uvalue
->refcnt
, refcount_read(&kvalue
->refcnt
));
270 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map
*map
, void *key
)
272 return ERR_PTR(-EINVAL
);
275 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map
*st_map
)
277 const struct btf_type
*t
= st_map
->st_ops
->type
;
280 for (i
= 0; i
< btf_type_vlen(t
); i
++) {
281 if (st_map
->progs
[i
]) {
282 bpf_prog_put(st_map
->progs
[i
]);
283 st_map
->progs
[i
] = NULL
;
288 static int check_zero_holes(const struct btf_type
*t
, void *data
)
290 const struct btf_member
*member
;
291 u32 i
, moff
, msize
, prev_mend
= 0;
292 const struct btf_type
*mtype
;
294 for_each_member(i
, t
, member
) {
295 moff
= btf_member_bit_offset(t
, member
) / 8;
296 if (moff
> prev_mend
&&
297 memchr_inv(data
+ prev_mend
, 0, moff
- prev_mend
))
300 mtype
= btf_type_by_id(btf_vmlinux
, member
->type
);
301 mtype
= btf_resolve_size(btf_vmlinux
, mtype
, &msize
,
304 return PTR_ERR(mtype
);
305 prev_mend
= moff
+ msize
;
308 if (t
->size
> prev_mend
&&
309 memchr_inv(data
+ prev_mend
, 0, t
->size
- prev_mend
))
315 static int bpf_struct_ops_map_update_elem(struct bpf_map
*map
, void *key
,
316 void *value
, u64 flags
)
318 struct bpf_struct_ops_map
*st_map
= (struct bpf_struct_ops_map
*)map
;
319 const struct bpf_struct_ops
*st_ops
= st_map
->st_ops
;
320 struct bpf_struct_ops_value
*uvalue
, *kvalue
;
321 const struct btf_member
*member
;
322 const struct btf_type
*t
= st_ops
->type
;
324 int prog_fd
, err
= 0;
331 if (*(u32
*)key
!= 0)
334 err
= check_zero_holes(st_ops
->value_type
, value
);
338 uvalue
= (struct bpf_struct_ops_value
*)value
;
339 err
= check_zero_holes(t
, uvalue
->data
);
343 if (uvalue
->state
|| refcount_read(&uvalue
->refcnt
))
346 uvalue
= (struct bpf_struct_ops_value
*)st_map
->uvalue
;
347 kvalue
= (struct bpf_struct_ops_value
*)&st_map
->kvalue
;
349 mutex_lock(&st_map
->lock
);
351 if (kvalue
->state
!= BPF_STRUCT_OPS_STATE_INIT
) {
356 memcpy(uvalue
, value
, map
->value_size
);
358 udata
= &uvalue
->data
;
359 kdata
= &kvalue
->data
;
360 image
= st_map
->image
;
362 for_each_member(i
, t
, member
) {
363 const struct btf_type
*mtype
, *ptype
;
364 struct bpf_prog
*prog
;
367 moff
= btf_member_bit_offset(t
, member
) / 8;
368 ptype
= btf_type_resolve_ptr(btf_vmlinux
, member
->type
, NULL
);
369 if (ptype
== module_type
) {
370 if (*(void **)(udata
+ moff
))
372 *(void **)(kdata
+ moff
) = BPF_MODULE_OWNER
;
376 err
= st_ops
->init_member(t
, member
, kdata
, udata
);
380 /* The ->init_member() has handled this member */
384 /* If st_ops->init_member does not handle it,
385 * we will only handle func ptrs and zero-ed members
386 * here. Reject everything else.
389 /* All non func ptr member must be 0 */
390 if (!ptype
|| !btf_type_is_func_proto(ptype
)) {
393 mtype
= btf_type_by_id(btf_vmlinux
, member
->type
);
394 mtype
= btf_resolve_size(btf_vmlinux
, mtype
, &msize
,
397 err
= PTR_ERR(mtype
);
401 if (memchr_inv(udata
+ moff
, 0, msize
)) {
409 prog_fd
= (int)(*(unsigned long *)(udata
+ moff
));
410 /* Similar check as the attr->attach_prog_fd */
414 prog
= bpf_prog_get(prog_fd
);
419 st_map
->progs
[i
] = prog
;
421 if (prog
->type
!= BPF_PROG_TYPE_STRUCT_OPS
||
422 prog
->aux
->attach_btf_id
!= st_ops
->type_id
||
423 prog
->expected_attach_type
!= i
) {
428 err
= arch_prepare_bpf_trampoline(image
,
429 st_map
->image
+ PAGE_SIZE
,
430 &st_ops
->func_models
[i
], 0,
431 &prog
, 1, NULL
, 0, NULL
);
435 *(void **)(kdata
+ moff
) = image
;
438 /* put prog_id to udata */
439 *(unsigned long *)(udata
+ moff
) = prog
->aux
->id
;
442 refcount_set(&kvalue
->refcnt
, 1);
445 set_memory_ro((long)st_map
->image
, 1);
446 set_memory_x((long)st_map
->image
, 1);
447 err
= st_ops
->reg(kdata
);
449 /* Pair with smp_load_acquire() during lookup_elem().
450 * It ensures the above udata updates (e.g. prog->aux->id)
451 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
453 smp_store_release(&kvalue
->state
, BPF_STRUCT_OPS_STATE_INUSE
);
457 /* Error during st_ops->reg(). It is very unlikely since
458 * the above init_member() should have caught it earlier
459 * before reg(). The only possibility is if there was a race
460 * in registering the struct_ops (under the same name) to
461 * a sub-system through different struct_ops's maps.
463 set_memory_nx((long)st_map
->image
, 1);
464 set_memory_rw((long)st_map
->image
, 1);
468 bpf_struct_ops_map_put_progs(st_map
);
469 memset(uvalue
, 0, map
->value_size
);
470 memset(kvalue
, 0, map
->value_size
);
472 mutex_unlock(&st_map
->lock
);
476 static int bpf_struct_ops_map_delete_elem(struct bpf_map
*map
, void *key
)
478 enum bpf_struct_ops_state prev_state
;
479 struct bpf_struct_ops_map
*st_map
;
481 st_map
= (struct bpf_struct_ops_map
*)map
;
482 prev_state
= cmpxchg(&st_map
->kvalue
.state
,
483 BPF_STRUCT_OPS_STATE_INUSE
,
484 BPF_STRUCT_OPS_STATE_TOBEFREE
);
485 if (prev_state
== BPF_STRUCT_OPS_STATE_INUSE
) {
486 st_map
->st_ops
->unreg(&st_map
->kvalue
.data
);
487 if (refcount_dec_and_test(&st_map
->kvalue
.refcnt
))
494 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map
*map
, void *key
,
500 value
= kmalloc(map
->value_size
, GFP_USER
| __GFP_NOWARN
);
504 err
= bpf_struct_ops_map_sys_lookup_elem(map
, key
, value
);
506 btf_type_seq_show(btf_vmlinux
, map
->btf_vmlinux_value_type_id
,
514 static void bpf_struct_ops_map_free(struct bpf_map
*map
)
516 struct bpf_struct_ops_map
*st_map
= (struct bpf_struct_ops_map
*)map
;
519 bpf_struct_ops_map_put_progs(st_map
);
520 bpf_map_area_free(st_map
->progs
);
521 bpf_jit_free_exec(st_map
->image
);
522 bpf_map_area_free(st_map
->uvalue
);
523 bpf_map_area_free(st_map
);
526 static int bpf_struct_ops_map_alloc_check(union bpf_attr
*attr
)
528 if (attr
->key_size
!= sizeof(unsigned int) || attr
->max_entries
!= 1 ||
529 attr
->map_flags
|| !attr
->btf_vmlinux_value_type_id
)
534 static struct bpf_map
*bpf_struct_ops_map_alloc(union bpf_attr
*attr
)
536 const struct bpf_struct_ops
*st_ops
;
537 size_t map_total_size
, st_map_size
;
538 struct bpf_struct_ops_map
*st_map
;
539 const struct btf_type
*t
, *vt
;
540 struct bpf_map_memory mem
;
544 if (!capable(CAP_SYS_ADMIN
))
545 return ERR_PTR(-EPERM
);
547 st_ops
= bpf_struct_ops_find_value(attr
->btf_vmlinux_value_type_id
);
549 return ERR_PTR(-ENOTSUPP
);
551 vt
= st_ops
->value_type
;
552 if (attr
->value_size
!= vt
->size
)
553 return ERR_PTR(-EINVAL
);
557 st_map_size
= sizeof(*st_map
) +
559 * struct bpf_struct_ops_tcp_congestions_ops
561 (vt
->size
- sizeof(struct bpf_struct_ops_value
));
562 map_total_size
= st_map_size
+
565 /* struct bpf_progs **progs */
566 btf_type_vlen(t
) * sizeof(struct bpf_prog
*);
567 err
= bpf_map_charge_init(&mem
, map_total_size
);
571 st_map
= bpf_map_area_alloc(st_map_size
, NUMA_NO_NODE
);
573 bpf_map_charge_finish(&mem
);
574 return ERR_PTR(-ENOMEM
);
576 st_map
->st_ops
= st_ops
;
579 st_map
->uvalue
= bpf_map_area_alloc(vt
->size
, NUMA_NO_NODE
);
581 bpf_map_area_alloc(btf_type_vlen(t
) * sizeof(struct bpf_prog
*),
583 st_map
->image
= bpf_jit_alloc_exec(PAGE_SIZE
);
584 if (!st_map
->uvalue
|| !st_map
->progs
|| !st_map
->image
) {
585 bpf_struct_ops_map_free(map
);
586 bpf_map_charge_finish(&mem
);
587 return ERR_PTR(-ENOMEM
);
590 mutex_init(&st_map
->lock
);
591 set_vm_flush_reset_perms(st_map
->image
);
592 bpf_map_init_from_attr(map
, attr
);
593 bpf_map_charge_move(&map
->memory
, &mem
);
598 const struct bpf_map_ops bpf_struct_ops_map_ops
= {
599 .map_alloc_check
= bpf_struct_ops_map_alloc_check
,
600 .map_alloc
= bpf_struct_ops_map_alloc
,
601 .map_free
= bpf_struct_ops_map_free
,
602 .map_get_next_key
= bpf_struct_ops_map_get_next_key
,
603 .map_lookup_elem
= bpf_struct_ops_map_lookup_elem
,
604 .map_delete_elem
= bpf_struct_ops_map_delete_elem
,
605 .map_update_elem
= bpf_struct_ops_map_update_elem
,
606 .map_seq_show_elem
= bpf_struct_ops_map_seq_show_elem
,
609 /* "const void *" because some subsystem is
610 * passing a const (e.g. const struct tcp_congestion_ops *)
612 bool bpf_struct_ops_get(const void *kdata
)
614 struct bpf_struct_ops_value
*kvalue
;
616 kvalue
= container_of(kdata
, struct bpf_struct_ops_value
, data
);
618 return refcount_inc_not_zero(&kvalue
->refcnt
);
621 void bpf_struct_ops_put(const void *kdata
)
623 struct bpf_struct_ops_value
*kvalue
;
625 kvalue
= container_of(kdata
, struct bpf_struct_ops_value
, data
);
626 if (refcount_dec_and_test(&kvalue
->refcnt
)) {
627 struct bpf_struct_ops_map
*st_map
;
629 st_map
= container_of(kvalue
, struct bpf_struct_ops_map
,
631 bpf_map_put(&st_map
->map
);