1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/rcupdate.h>
6 #include <linux/random.h>
8 #include <linux/topology.h>
9 #include <linux/ktime.h>
10 #include <linux/sched.h>
11 #include <linux/uidgid.h>
12 #include <linux/filter.h>
13 #include <linux/ctype.h>
15 #include "../../lib/kstrtox.h"
17 /* If kernel subsystem is allowing eBPF programs to call this function,
18 * inside its own verifier_ops->get_func_proto() callback it should return
19 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
21 * Different map implementations will rely on rcu in map methods
22 * lookup/update/delete, therefore eBPF programs must run under rcu lock
23 * if program is allowed to access maps, so check rcu_read_lock_held in
24 * all three functions.
26 BPF_CALL_2(bpf_map_lookup_elem
, struct bpf_map
*, map
, void *, key
)
28 WARN_ON_ONCE(!rcu_read_lock_held());
29 return (unsigned long) map
->ops
->map_lookup_elem(map
, key
);
32 const struct bpf_func_proto bpf_map_lookup_elem_proto
= {
33 .func
= bpf_map_lookup_elem
,
36 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
37 .arg1_type
= ARG_CONST_MAP_PTR
,
38 .arg2_type
= ARG_PTR_TO_MAP_KEY
,
41 BPF_CALL_4(bpf_map_update_elem
, struct bpf_map
*, map
, void *, key
,
42 void *, value
, u64
, flags
)
44 WARN_ON_ONCE(!rcu_read_lock_held());
45 return map
->ops
->map_update_elem(map
, key
, value
, flags
);
48 const struct bpf_func_proto bpf_map_update_elem_proto
= {
49 .func
= bpf_map_update_elem
,
52 .ret_type
= RET_INTEGER
,
53 .arg1_type
= ARG_CONST_MAP_PTR
,
54 .arg2_type
= ARG_PTR_TO_MAP_KEY
,
55 .arg3_type
= ARG_PTR_TO_MAP_VALUE
,
56 .arg4_type
= ARG_ANYTHING
,
59 BPF_CALL_2(bpf_map_delete_elem
, struct bpf_map
*, map
, void *, key
)
61 WARN_ON_ONCE(!rcu_read_lock_held());
62 return map
->ops
->map_delete_elem(map
, key
);
65 const struct bpf_func_proto bpf_map_delete_elem_proto
= {
66 .func
= bpf_map_delete_elem
,
69 .ret_type
= RET_INTEGER
,
70 .arg1_type
= ARG_CONST_MAP_PTR
,
71 .arg2_type
= ARG_PTR_TO_MAP_KEY
,
74 BPF_CALL_3(bpf_map_push_elem
, struct bpf_map
*, map
, void *, value
, u64
, flags
)
76 return map
->ops
->map_push_elem(map
, value
, flags
);
79 const struct bpf_func_proto bpf_map_push_elem_proto
= {
80 .func
= bpf_map_push_elem
,
83 .ret_type
= RET_INTEGER
,
84 .arg1_type
= ARG_CONST_MAP_PTR
,
85 .arg2_type
= ARG_PTR_TO_MAP_VALUE
,
86 .arg3_type
= ARG_ANYTHING
,
89 BPF_CALL_2(bpf_map_pop_elem
, struct bpf_map
*, map
, void *, value
)
91 return map
->ops
->map_pop_elem(map
, value
);
94 const struct bpf_func_proto bpf_map_pop_elem_proto
= {
95 .func
= bpf_map_pop_elem
,
97 .ret_type
= RET_INTEGER
,
98 .arg1_type
= ARG_CONST_MAP_PTR
,
99 .arg2_type
= ARG_PTR_TO_UNINIT_MAP_VALUE
,
102 BPF_CALL_2(bpf_map_peek_elem
, struct bpf_map
*, map
, void *, value
)
104 return map
->ops
->map_peek_elem(map
, value
);
107 const struct bpf_func_proto bpf_map_peek_elem_proto
= {
108 .func
= bpf_map_pop_elem
,
110 .ret_type
= RET_INTEGER
,
111 .arg1_type
= ARG_CONST_MAP_PTR
,
112 .arg2_type
= ARG_PTR_TO_UNINIT_MAP_VALUE
,
115 const struct bpf_func_proto bpf_get_prandom_u32_proto
= {
116 .func
= bpf_user_rnd_u32
,
118 .ret_type
= RET_INTEGER
,
121 BPF_CALL_0(bpf_get_smp_processor_id
)
123 return smp_processor_id();
126 const struct bpf_func_proto bpf_get_smp_processor_id_proto
= {
127 .func
= bpf_get_smp_processor_id
,
129 .ret_type
= RET_INTEGER
,
132 BPF_CALL_0(bpf_get_numa_node_id
)
134 return numa_node_id();
137 const struct bpf_func_proto bpf_get_numa_node_id_proto
= {
138 .func
= bpf_get_numa_node_id
,
140 .ret_type
= RET_INTEGER
,
143 BPF_CALL_0(bpf_ktime_get_ns
)
145 /* NMI safe access to clock monotonic */
146 return ktime_get_mono_fast_ns();
149 const struct bpf_func_proto bpf_ktime_get_ns_proto
= {
150 .func
= bpf_ktime_get_ns
,
152 .ret_type
= RET_INTEGER
,
155 BPF_CALL_0(bpf_get_current_pid_tgid
)
157 struct task_struct
*task
= current
;
162 return (u64
) task
->tgid
<< 32 | task
->pid
;
165 const struct bpf_func_proto bpf_get_current_pid_tgid_proto
= {
166 .func
= bpf_get_current_pid_tgid
,
168 .ret_type
= RET_INTEGER
,
171 BPF_CALL_0(bpf_get_current_uid_gid
)
173 struct task_struct
*task
= current
;
180 current_uid_gid(&uid
, &gid
);
181 return (u64
) from_kgid(&init_user_ns
, gid
) << 32 |
182 from_kuid(&init_user_ns
, uid
);
185 const struct bpf_func_proto bpf_get_current_uid_gid_proto
= {
186 .func
= bpf_get_current_uid_gid
,
188 .ret_type
= RET_INTEGER
,
191 BPF_CALL_2(bpf_get_current_comm
, char *, buf
, u32
, size
)
193 struct task_struct
*task
= current
;
198 strncpy(buf
, task
->comm
, size
);
200 /* Verifier guarantees that size > 0. For task->comm exceeding
201 * size, guarantee that buf is %NUL-terminated. Unconditionally
202 * done here to save the size test.
207 memset(buf
, 0, size
);
211 const struct bpf_func_proto bpf_get_current_comm_proto
= {
212 .func
= bpf_get_current_comm
,
214 .ret_type
= RET_INTEGER
,
215 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
216 .arg2_type
= ARG_CONST_SIZE
,
219 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
221 static inline void __bpf_spin_lock(struct bpf_spin_lock
*lock
)
223 arch_spinlock_t
*l
= (void *)lock
;
226 arch_spinlock_t lock
;
227 } u
= { .lock
= __ARCH_SPIN_LOCK_UNLOCKED
};
229 compiletime_assert(u
.val
== 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
230 BUILD_BUG_ON(sizeof(*l
) != sizeof(__u32
));
231 BUILD_BUG_ON(sizeof(*lock
) != sizeof(__u32
));
235 static inline void __bpf_spin_unlock(struct bpf_spin_lock
*lock
)
237 arch_spinlock_t
*l
= (void *)lock
;
244 static inline void __bpf_spin_lock(struct bpf_spin_lock
*lock
)
246 atomic_t
*l
= (void *)lock
;
248 BUILD_BUG_ON(sizeof(*l
) != sizeof(*lock
));
250 atomic_cond_read_relaxed(l
, !VAL
);
251 } while (atomic_xchg(l
, 1));
254 static inline void __bpf_spin_unlock(struct bpf_spin_lock
*lock
)
256 atomic_t
*l
= (void *)lock
;
258 atomic_set_release(l
, 0);
263 static DEFINE_PER_CPU(unsigned long, irqsave_flags
);
265 notrace
BPF_CALL_1(bpf_spin_lock
, struct bpf_spin_lock
*, lock
)
269 local_irq_save(flags
);
270 __bpf_spin_lock(lock
);
271 __this_cpu_write(irqsave_flags
, flags
);
275 const struct bpf_func_proto bpf_spin_lock_proto
= {
276 .func
= bpf_spin_lock
,
278 .ret_type
= RET_VOID
,
279 .arg1_type
= ARG_PTR_TO_SPIN_LOCK
,
282 notrace
BPF_CALL_1(bpf_spin_unlock
, struct bpf_spin_lock
*, lock
)
286 flags
= __this_cpu_read(irqsave_flags
);
287 __bpf_spin_unlock(lock
);
288 local_irq_restore(flags
);
292 const struct bpf_func_proto bpf_spin_unlock_proto
= {
293 .func
= bpf_spin_unlock
,
295 .ret_type
= RET_VOID
,
296 .arg1_type
= ARG_PTR_TO_SPIN_LOCK
,
299 void copy_map_value_locked(struct bpf_map
*map
, void *dst
, void *src
,
302 struct bpf_spin_lock
*lock
;
305 lock
= src
+ map
->spin_lock_off
;
307 lock
= dst
+ map
->spin_lock_off
;
309 ____bpf_spin_lock(lock
);
310 copy_map_value(map
, dst
, src
);
311 ____bpf_spin_unlock(lock
);
315 #ifdef CONFIG_CGROUPS
316 BPF_CALL_0(bpf_get_current_cgroup_id
)
318 struct cgroup
*cgrp
= task_dfl_cgroup(current
);
320 return cgrp
->kn
->id
.id
;
323 const struct bpf_func_proto bpf_get_current_cgroup_id_proto
= {
324 .func
= bpf_get_current_cgroup_id
,
326 .ret_type
= RET_INTEGER
,
329 #ifdef CONFIG_CGROUP_BPF
330 DECLARE_PER_CPU(struct bpf_cgroup_storage
*,
331 bpf_cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
]);
333 BPF_CALL_2(bpf_get_local_storage
, struct bpf_map
*, map
, u64
, flags
)
335 /* flags argument is not used now,
336 * but provides an ability to extend the API.
337 * verifier checks that its value is correct.
339 enum bpf_cgroup_storage_type stype
= cgroup_storage_type(map
);
340 struct bpf_cgroup_storage
*storage
;
343 storage
= this_cpu_read(bpf_cgroup_storage
[stype
]);
345 if (stype
== BPF_CGROUP_STORAGE_SHARED
)
346 ptr
= &READ_ONCE(storage
->buf
)->data
[0];
348 ptr
= this_cpu_ptr(storage
->percpu_buf
);
350 return (unsigned long)ptr
;
353 const struct bpf_func_proto bpf_get_local_storage_proto
= {
354 .func
= bpf_get_local_storage
,
356 .ret_type
= RET_PTR_TO_MAP_VALUE
,
357 .arg1_type
= ARG_CONST_MAP_PTR
,
358 .arg2_type
= ARG_ANYTHING
,
362 #define BPF_STRTOX_BASE_MASK 0x1F
364 static int __bpf_strtoull(const char *buf
, size_t buf_len
, u64 flags
,
365 unsigned long long *res
, bool *is_negative
)
367 unsigned int base
= flags
& BPF_STRTOX_BASE_MASK
;
368 const char *cur_buf
= buf
;
369 size_t cur_len
= buf_len
;
370 unsigned int consumed
;
374 if (!buf
|| !buf_len
|| !res
|| !is_negative
)
377 if (base
!= 0 && base
!= 8 && base
!= 10 && base
!= 16)
380 if (flags
& ~BPF_STRTOX_BASE_MASK
)
383 while (cur_buf
< buf
+ buf_len
&& isspace(*cur_buf
))
386 *is_negative
= (cur_buf
< buf
+ buf_len
&& *cur_buf
== '-');
390 consumed
= cur_buf
- buf
;
395 cur_len
= min(cur_len
, sizeof(str
) - 1);
396 memcpy(str
, cur_buf
, cur_len
);
400 cur_buf
= _parse_integer_fixup_radix(cur_buf
, &base
);
401 val_len
= _parse_integer(cur_buf
, base
, res
);
403 if (val_len
& KSTRTOX_OVERFLOW
)
410 consumed
+= cur_buf
- str
;
415 static int __bpf_strtoll(const char *buf
, size_t buf_len
, u64 flags
,
418 unsigned long long _res
;
422 err
= __bpf_strtoull(buf
, buf_len
, flags
, &_res
, &is_negative
);
426 if ((long long)-_res
> 0)
430 if ((long long)_res
< 0)
437 BPF_CALL_4(bpf_strtol
, const char *, buf
, size_t, buf_len
, u64
, flags
,
443 err
= __bpf_strtoll(buf
, buf_len
, flags
, &_res
);
446 if (_res
!= (long)_res
)
452 const struct bpf_func_proto bpf_strtol_proto
= {
455 .ret_type
= RET_INTEGER
,
456 .arg1_type
= ARG_PTR_TO_MEM
,
457 .arg2_type
= ARG_CONST_SIZE
,
458 .arg3_type
= ARG_ANYTHING
,
459 .arg4_type
= ARG_PTR_TO_LONG
,
462 BPF_CALL_4(bpf_strtoul
, const char *, buf
, size_t, buf_len
, u64
, flags
,
463 unsigned long *, res
)
465 unsigned long long _res
;
469 err
= __bpf_strtoull(buf
, buf_len
, flags
, &_res
, &is_negative
);
474 if (_res
!= (unsigned long)_res
)
480 const struct bpf_func_proto bpf_strtoul_proto
= {
483 .ret_type
= RET_INTEGER
,
484 .arg1_type
= ARG_PTR_TO_MEM
,
485 .arg2_type
= ARG_CONST_SIZE
,
486 .arg3_type
= ARG_ANYTHING
,
487 .arg4_type
= ARG_PTR_TO_LONG
,