printf: Remove unused 'bprintf'
[drm/drm-misc.git] / kernel / bpf / kmem_cache_iter.c
blob3ae2158d767f4526e310f82d302417fa82a25291
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Google */
3 #include <linux/bpf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/seq_file.h>
9 #include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
11 /* open-coded version */
12 struct bpf_iter_kmem_cache {
13 __u64 __opaque[1];
14 } __attribute__((aligned(8)));
16 struct bpf_iter_kmem_cache_kern {
17 struct kmem_cache *pos;
18 } __attribute__((aligned(8)));
20 #define KMEM_CACHE_POS_START ((void *)1L)
22 __bpf_kfunc_start_defs();
24 __bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
26 struct bpf_iter_kmem_cache_kern *kit = (void *)it;
28 BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
29 BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
31 kit->pos = KMEM_CACHE_POS_START;
32 return 0;
35 __bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
37 struct bpf_iter_kmem_cache_kern *kit = (void *)it;
38 struct kmem_cache *prev = kit->pos;
39 struct kmem_cache *next;
40 bool destroy = false;
42 if (!prev)
43 return NULL;
45 mutex_lock(&slab_mutex);
47 if (list_empty(&slab_caches)) {
48 mutex_unlock(&slab_mutex);
49 return NULL;
52 if (prev == KMEM_CACHE_POS_START)
53 next = list_first_entry(&slab_caches, struct kmem_cache, list);
54 else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
55 next = NULL;
56 else
57 next = list_next_entry(prev, list);
59 /* boot_caches have negative refcount, don't touch them */
60 if (next && next->refcount > 0)
61 next->refcount++;
63 /* Skip kmem_cache_destroy() for active entries */
64 if (prev && prev != KMEM_CACHE_POS_START) {
65 if (prev->refcount > 1)
66 prev->refcount--;
67 else if (prev->refcount == 1)
68 destroy = true;
71 mutex_unlock(&slab_mutex);
73 if (destroy)
74 kmem_cache_destroy(prev);
76 kit->pos = next;
77 return next;
80 __bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
82 struct bpf_iter_kmem_cache_kern *kit = (void *)it;
83 struct kmem_cache *s = kit->pos;
84 bool destroy = false;
86 if (s == NULL || s == KMEM_CACHE_POS_START)
87 return;
89 mutex_lock(&slab_mutex);
91 /* Skip kmem_cache_destroy() for active entries */
92 if (s->refcount > 1)
93 s->refcount--;
94 else if (s->refcount == 1)
95 destroy = true;
97 mutex_unlock(&slab_mutex);
99 if (destroy)
100 kmem_cache_destroy(s);
103 __bpf_kfunc_end_defs();
105 struct bpf_iter__kmem_cache {
106 __bpf_md_ptr(struct bpf_iter_meta *, meta);
107 __bpf_md_ptr(struct kmem_cache *, s);
110 union kmem_cache_iter_priv {
111 struct bpf_iter_kmem_cache it;
112 struct bpf_iter_kmem_cache_kern kit;
115 static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
117 loff_t cnt = 0;
118 bool found = false;
119 struct kmem_cache *s;
120 union kmem_cache_iter_priv *p = seq->private;
122 mutex_lock(&slab_mutex);
124 /* Find an entry at the given position in the slab_caches list instead
125 * of keeping a reference (of the last visited entry, if any) out of
126 * slab_mutex. It might miss something if one is deleted in the middle
127 * while it releases the lock. But it should be rare and there's not
128 * much we can do about it.
130 list_for_each_entry(s, &slab_caches, list) {
131 if (cnt == *pos) {
132 /* Make sure this entry remains in the list by getting
133 * a new reference count. Note that boot_cache entries
134 * have a negative refcount, so don't touch them.
136 if (s->refcount > 0)
137 s->refcount++;
138 found = true;
139 break;
141 cnt++;
143 mutex_unlock(&slab_mutex);
145 if (!found)
146 s = NULL;
148 p->kit.pos = s;
149 return s;
152 static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
154 struct bpf_iter_meta meta;
155 struct bpf_iter__kmem_cache ctx = {
156 .meta = &meta,
157 .s = v,
159 union kmem_cache_iter_priv *p = seq->private;
160 struct bpf_prog *prog;
162 meta.seq = seq;
163 prog = bpf_iter_get_info(&meta, true);
164 if (prog && !ctx.s)
165 bpf_iter_run_prog(prog, &ctx);
167 bpf_iter_kmem_cache_destroy(&p->it);
170 static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
172 union kmem_cache_iter_priv *p = seq->private;
174 ++*pos;
176 return bpf_iter_kmem_cache_next(&p->it);
179 static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v)
181 struct bpf_iter_meta meta;
182 struct bpf_iter__kmem_cache ctx = {
183 .meta = &meta,
184 .s = v,
186 struct bpf_prog *prog;
187 int ret = 0;
189 meta.seq = seq;
190 prog = bpf_iter_get_info(&meta, false);
191 if (prog)
192 ret = bpf_iter_run_prog(prog, &ctx);
194 return ret;
197 static const struct seq_operations kmem_cache_iter_seq_ops = {
198 .start = kmem_cache_iter_seq_start,
199 .next = kmem_cache_iter_seq_next,
200 .stop = kmem_cache_iter_seq_stop,
201 .show = kmem_cache_iter_seq_show,
204 BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
206 static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
207 .seq_ops = &kmem_cache_iter_seq_ops,
208 .seq_priv_size = sizeof(union kmem_cache_iter_priv),
211 static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
212 struct seq_file *seq)
214 seq_puts(seq, "kmem_cache iter\n");
217 DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta,
218 struct kmem_cache *s)
220 static struct bpf_iter_reg bpf_kmem_cache_reg_info = {
221 .target = "kmem_cache",
222 .feature = BPF_ITER_RESCHED,
223 .show_fdinfo = bpf_iter_kmem_cache_show_fdinfo,
224 .ctx_arg_info_size = 1,
225 .ctx_arg_info = {
226 { offsetof(struct bpf_iter__kmem_cache, s),
227 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
229 .seq_info = &kmem_cache_iter_seq_info,
232 static int __init bpf_kmem_cache_iter_init(void)
234 bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0];
235 return bpf_iter_reg_target(&bpf_kmem_cache_reg_info);
238 late_initcall(bpf_kmem_cache_iter_init);