1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
5 #include <linux/filter.h>
6 #include <linux/kernel.h>
7 #include <linux/btf_ids.h>
9 struct bpf_iter_seq_map_info
{
13 static void *bpf_map_seq_start(struct seq_file
*seq
, loff_t
*pos
)
15 struct bpf_iter_seq_map_info
*info
= seq
->private;
18 map
= bpf_map_get_curr_or_next(&info
->map_id
);
27 static void *bpf_map_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
29 struct bpf_iter_seq_map_info
*info
= seq
->private;
33 bpf_map_put((struct bpf_map
*)v
);
34 return bpf_map_get_curr_or_next(&info
->map_id
);
37 struct bpf_iter__bpf_map
{
38 __bpf_md_ptr(struct bpf_iter_meta
*, meta
);
39 __bpf_md_ptr(struct bpf_map
*, map
);
42 DEFINE_BPF_ITER_FUNC(bpf_map
, struct bpf_iter_meta
*meta
, struct bpf_map
*map
)
44 static int __bpf_map_seq_show(struct seq_file
*seq
, void *v
, bool in_stop
)
46 struct bpf_iter__bpf_map ctx
;
47 struct bpf_iter_meta meta
;
48 struct bpf_prog
*prog
;
54 prog
= bpf_iter_get_info(&meta
, in_stop
);
56 ret
= bpf_iter_run_prog(prog
, &ctx
);
61 static int bpf_map_seq_show(struct seq_file
*seq
, void *v
)
63 return __bpf_map_seq_show(seq
, v
, false);
66 static void bpf_map_seq_stop(struct seq_file
*seq
, void *v
)
69 (void)__bpf_map_seq_show(seq
, v
, true);
71 bpf_map_put((struct bpf_map
*)v
);
74 static const struct seq_operations bpf_map_seq_ops
= {
75 .start
= bpf_map_seq_start
,
76 .next
= bpf_map_seq_next
,
77 .stop
= bpf_map_seq_stop
,
78 .show
= bpf_map_seq_show
,
81 BTF_ID_LIST_GLOBAL_SINGLE(btf_bpf_map_id
, struct, bpf_map
)
83 static const struct bpf_iter_seq_info bpf_map_seq_info
= {
84 .seq_ops
= &bpf_map_seq_ops
,
85 .init_seq_private
= NULL
,
86 .fini_seq_private
= NULL
,
87 .seq_priv_size
= sizeof(struct bpf_iter_seq_map_info
),
90 static struct bpf_iter_reg bpf_map_reg_info
= {
92 .ctx_arg_info_size
= 1,
94 { offsetof(struct bpf_iter__bpf_map
, map
),
95 PTR_TO_BTF_ID_OR_NULL
| PTR_TRUSTED
},
97 .seq_info
= &bpf_map_seq_info
,
100 static int bpf_iter_attach_map(struct bpf_prog
*prog
,
101 union bpf_iter_link_info
*linfo
,
102 struct bpf_iter_aux_info
*aux
)
104 u32 key_acc_size
, value_acc_size
, key_size
, value_size
;
106 bool is_percpu
= false;
109 if (!linfo
->map
.map_fd
)
112 map
= bpf_map_get_with_uref(linfo
->map
.map_fd
);
116 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
117 map
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
||
118 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
120 else if (map
->map_type
!= BPF_MAP_TYPE_HASH
&&
121 map
->map_type
!= BPF_MAP_TYPE_LRU_HASH
&&
122 map
->map_type
!= BPF_MAP_TYPE_ARRAY
)
125 key_acc_size
= prog
->aux
->max_rdonly_access
;
126 value_acc_size
= prog
->aux
->max_rdwr_access
;
127 key_size
= map
->key_size
;
129 value_size
= map
->value_size
;
131 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
133 if (key_acc_size
> key_size
|| value_acc_size
> value_size
) {
142 bpf_map_put_with_uref(map
);
146 static void bpf_iter_detach_map(struct bpf_iter_aux_info
*aux
)
148 bpf_map_put_with_uref(aux
->map
);
151 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info
*aux
,
152 struct seq_file
*seq
)
154 seq_printf(seq
, "map_id:\t%u\n", aux
->map
->id
);
157 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info
*aux
,
158 struct bpf_link_info
*info
)
160 info
->iter
.map
.map_id
= aux
->map
->id
;
164 DEFINE_BPF_ITER_FUNC(bpf_map_elem
, struct bpf_iter_meta
*meta
,
165 struct bpf_map
*map
, void *key
, void *value
)
167 static const struct bpf_iter_reg bpf_map_elem_reg_info
= {
168 .target
= "bpf_map_elem",
169 .attach_target
= bpf_iter_attach_map
,
170 .detach_target
= bpf_iter_detach_map
,
171 .show_fdinfo
= bpf_iter_map_show_fdinfo
,
172 .fill_link_info
= bpf_iter_map_fill_link_info
,
173 .ctx_arg_info_size
= 2,
175 { offsetof(struct bpf_iter__bpf_map_elem
, key
),
176 PTR_TO_BUF
| PTR_MAYBE_NULL
| MEM_RDONLY
},
177 { offsetof(struct bpf_iter__bpf_map_elem
, value
),
178 PTR_TO_BUF
| PTR_MAYBE_NULL
},
182 static int __init
bpf_map_iter_init(void)
186 bpf_map_reg_info
.ctx_arg_info
[0].btf_id
= *btf_bpf_map_id
;
187 ret
= bpf_iter_reg_target(&bpf_map_reg_info
);
191 return bpf_iter_reg_target(&bpf_map_elem_reg_info
);
194 late_initcall(bpf_map_iter_init
);
196 __bpf_kfunc_start_defs();
198 __bpf_kfunc s64
bpf_map_sum_elem_count(const struct bpf_map
*map
)
204 if (!map
|| !map
->elem_count
)
207 for_each_possible_cpu(cpu
) {
208 pcount
= per_cpu_ptr(map
->elem_count
, cpu
);
209 ret
+= READ_ONCE(*pcount
);
214 __bpf_kfunc_end_defs();
216 BTF_KFUNCS_START(bpf_map_iter_kfunc_ids
)
217 BTF_ID_FLAGS(func
, bpf_map_sum_elem_count
, KF_TRUSTED_ARGS
)
218 BTF_KFUNCS_END(bpf_map_iter_kfunc_ids
)
220 static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set
= {
221 .owner
= THIS_MODULE
,
222 .set
= &bpf_map_iter_kfunc_ids
,
225 static int init_subsystem(void)
227 return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC
, &bpf_map_iter_kfunc_set
);
229 late_initcall(init_subsystem
);