1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/rcupdate.h>
14 #include <linux/random.h>
15 #include <linux/smp.h>
16 #include <linux/topology.h>
17 #include <linux/ktime.h>
18 #include <linux/sched.h>
19 #include <linux/uidgid.h>
20 #include <linux/filter.h>
22 /* If kernel subsystem is allowing eBPF programs to call this function,
23 * inside its own verifier_ops->get_func_proto() callback it should return
24 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
26 * Different map implementations will rely on rcu in map methods
27 * lookup/update/delete, therefore eBPF programs must run under rcu lock
28 * if program is allowed to access maps, so check rcu_read_lock_held in
29 * all three functions.
31 BPF_CALL_2(bpf_map_lookup_elem
, struct bpf_map
*, map
, void *, key
)
33 WARN_ON_ONCE(!rcu_read_lock_held());
34 return (unsigned long) map
->ops
->map_lookup_elem(map
, key
);
37 const struct bpf_func_proto bpf_map_lookup_elem_proto
= {
38 .func
= bpf_map_lookup_elem
,
41 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
42 .arg1_type
= ARG_CONST_MAP_PTR
,
43 .arg2_type
= ARG_PTR_TO_MAP_KEY
,
46 BPF_CALL_4(bpf_map_update_elem
, struct bpf_map
*, map
, void *, key
,
47 void *, value
, u64
, flags
)
49 WARN_ON_ONCE(!rcu_read_lock_held());
50 return map
->ops
->map_update_elem(map
, key
, value
, flags
);
53 const struct bpf_func_proto bpf_map_update_elem_proto
= {
54 .func
= bpf_map_update_elem
,
57 .ret_type
= RET_INTEGER
,
58 .arg1_type
= ARG_CONST_MAP_PTR
,
59 .arg2_type
= ARG_PTR_TO_MAP_KEY
,
60 .arg3_type
= ARG_PTR_TO_MAP_VALUE
,
61 .arg4_type
= ARG_ANYTHING
,
64 BPF_CALL_2(bpf_map_delete_elem
, struct bpf_map
*, map
, void *, key
)
66 WARN_ON_ONCE(!rcu_read_lock_held());
67 return map
->ops
->map_delete_elem(map
, key
);
70 const struct bpf_func_proto bpf_map_delete_elem_proto
= {
71 .func
= bpf_map_delete_elem
,
74 .ret_type
= RET_INTEGER
,
75 .arg1_type
= ARG_CONST_MAP_PTR
,
76 .arg2_type
= ARG_PTR_TO_MAP_KEY
,
79 BPF_CALL_3(bpf_map_push_elem
, struct bpf_map
*, map
, void *, value
, u64
, flags
)
81 return map
->ops
->map_push_elem(map
, value
, flags
);
84 const struct bpf_func_proto bpf_map_push_elem_proto
= {
85 .func
= bpf_map_push_elem
,
88 .ret_type
= RET_INTEGER
,
89 .arg1_type
= ARG_CONST_MAP_PTR
,
90 .arg2_type
= ARG_PTR_TO_MAP_VALUE
,
91 .arg3_type
= ARG_ANYTHING
,
94 BPF_CALL_2(bpf_map_pop_elem
, struct bpf_map
*, map
, void *, value
)
96 return map
->ops
->map_pop_elem(map
, value
);
99 const struct bpf_func_proto bpf_map_pop_elem_proto
= {
100 .func
= bpf_map_pop_elem
,
103 .ret_type
= RET_INTEGER
,
104 .arg1_type
= ARG_CONST_MAP_PTR
,
105 .arg2_type
= ARG_PTR_TO_UNINIT_MAP_VALUE
,
108 BPF_CALL_2(bpf_map_peek_elem
, struct bpf_map
*, map
, void *, value
)
110 return map
->ops
->map_peek_elem(map
, value
);
113 const struct bpf_func_proto bpf_map_peek_elem_proto
= {
114 .func
= bpf_map_pop_elem
,
117 .ret_type
= RET_INTEGER
,
118 .arg1_type
= ARG_CONST_MAP_PTR
,
119 .arg2_type
= ARG_PTR_TO_UNINIT_MAP_VALUE
,
122 const struct bpf_func_proto bpf_get_prandom_u32_proto
= {
123 .func
= bpf_user_rnd_u32
,
125 .ret_type
= RET_INTEGER
,
128 BPF_CALL_0(bpf_get_smp_processor_id
)
130 return smp_processor_id();
133 const struct bpf_func_proto bpf_get_smp_processor_id_proto
= {
134 .func
= bpf_get_smp_processor_id
,
136 .ret_type
= RET_INTEGER
,
139 BPF_CALL_0(bpf_get_numa_node_id
)
141 return numa_node_id();
144 const struct bpf_func_proto bpf_get_numa_node_id_proto
= {
145 .func
= bpf_get_numa_node_id
,
147 .ret_type
= RET_INTEGER
,
150 BPF_CALL_0(bpf_ktime_get_ns
)
152 /* NMI safe access to clock monotonic */
153 return ktime_get_mono_fast_ns();
156 const struct bpf_func_proto bpf_ktime_get_ns_proto
= {
157 .func
= bpf_ktime_get_ns
,
159 .ret_type
= RET_INTEGER
,
162 BPF_CALL_0(bpf_get_current_pid_tgid
)
164 struct task_struct
*task
= current
;
169 return (u64
) task
->tgid
<< 32 | task
->pid
;
172 const struct bpf_func_proto bpf_get_current_pid_tgid_proto
= {
173 .func
= bpf_get_current_pid_tgid
,
175 .ret_type
= RET_INTEGER
,
178 BPF_CALL_0(bpf_get_current_uid_gid
)
180 struct task_struct
*task
= current
;
187 current_uid_gid(&uid
, &gid
);
188 return (u64
) from_kgid(&init_user_ns
, gid
) << 32 |
189 from_kuid(&init_user_ns
, uid
);
192 const struct bpf_func_proto bpf_get_current_uid_gid_proto
= {
193 .func
= bpf_get_current_uid_gid
,
195 .ret_type
= RET_INTEGER
,
198 BPF_CALL_2(bpf_get_current_comm
, char *, buf
, u32
, size
)
200 struct task_struct
*task
= current
;
205 strncpy(buf
, task
->comm
, size
);
207 /* Verifier guarantees that size > 0. For task->comm exceeding
208 * size, guarantee that buf is %NUL-terminated. Unconditionally
209 * done here to save the size test.
214 memset(buf
, 0, size
);
218 const struct bpf_func_proto bpf_get_current_comm_proto
= {
219 .func
= bpf_get_current_comm
,
221 .ret_type
= RET_INTEGER
,
222 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
223 .arg2_type
= ARG_CONST_SIZE
,
226 #ifdef CONFIG_CGROUPS
227 BPF_CALL_0(bpf_get_current_cgroup_id
)
229 struct cgroup
*cgrp
= task_dfl_cgroup(current
);
231 return cgrp
->kn
->id
.id
;
234 const struct bpf_func_proto bpf_get_current_cgroup_id_proto
= {
235 .func
= bpf_get_current_cgroup_id
,
237 .ret_type
= RET_INTEGER
,
240 #ifdef CONFIG_CGROUP_BPF
241 DECLARE_PER_CPU(struct bpf_cgroup_storage
*,
242 bpf_cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
]);
244 BPF_CALL_2(bpf_get_local_storage
, struct bpf_map
*, map
, u64
, flags
)
246 /* flags argument is not used now,
247 * but provides an ability to extend the API.
248 * verifier checks that its value is correct.
250 enum bpf_cgroup_storage_type stype
= cgroup_storage_type(map
);
251 struct bpf_cgroup_storage
*storage
;
254 storage
= this_cpu_read(bpf_cgroup_storage
[stype
]);
256 if (stype
== BPF_CGROUP_STORAGE_SHARED
)
257 ptr
= &READ_ONCE(storage
->buf
)->data
[0];
259 ptr
= this_cpu_ptr(storage
->percpu_buf
);
261 return (unsigned long)ptr
;
264 const struct bpf_func_proto bpf_get_local_storage_proto
= {
265 .func
= bpf_get_local_storage
,
267 .ret_type
= RET_PTR_TO_MAP_VALUE
,
268 .arg1_type
= ARG_CONST_MAP_PTR
,
269 .arg2_type
= ARG_ANYTHING
,