1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
5 #include <linux/jhash.h>
6 #include <linux/filter.h>
7 #include <linux/stacktrace.h>
8 #include <linux/perf_event.h>
10 #include <linux/pagemap.h>
11 #include <linux/irq_work.h>
12 #include "percpu_freelist.h"
14 #define STACK_CREATE_FLAG_MASK \
15 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
18 struct stack_map_bucket
{
19 struct pcpu_freelist_node fnode
;
25 struct bpf_stack_map
{
28 struct pcpu_freelist freelist
;
30 struct stack_map_bucket
*buckets
[];
33 /* irq_work to run up_read() for build_id lookup in nmi context */
34 struct stack_map_irq_work
{
35 struct irq_work irq_work
;
39 static void do_up_read(struct irq_work
*entry
)
41 struct stack_map_irq_work
*work
;
43 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT
)))
46 work
= container_of(entry
, struct stack_map_irq_work
, irq_work
);
47 mmap_read_unlock_non_owner(work
->mm
);
50 static DEFINE_PER_CPU(struct stack_map_irq_work
, up_read_work
);
52 static inline bool stack_map_use_build_id(struct bpf_map
*map
)
54 return (map
->map_flags
& BPF_F_STACK_BUILD_ID
);
57 static inline int stack_map_data_size(struct bpf_map
*map
)
59 return stack_map_use_build_id(map
) ?
60 sizeof(struct bpf_stack_build_id
) : sizeof(u64
);
63 static int prealloc_elems_and_freelist(struct bpf_stack_map
*smap
)
65 u32 elem_size
= sizeof(struct stack_map_bucket
) + smap
->map
.value_size
;
68 smap
->elems
= bpf_map_area_alloc(elem_size
* smap
->map
.max_entries
,
73 err
= pcpu_freelist_init(&smap
->freelist
);
77 pcpu_freelist_populate(&smap
->freelist
, smap
->elems
, elem_size
,
78 smap
->map
.max_entries
);
82 bpf_map_area_free(smap
->elems
);
86 /* Called from syscall */
87 static struct bpf_map
*stack_map_alloc(union bpf_attr
*attr
)
89 u32 value_size
= attr
->value_size
;
90 struct bpf_stack_map
*smap
;
91 struct bpf_map_memory mem
;
96 return ERR_PTR(-EPERM
);
98 if (attr
->map_flags
& ~STACK_CREATE_FLAG_MASK
)
99 return ERR_PTR(-EINVAL
);
101 /* check sanity of attributes */
102 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
103 value_size
< 8 || value_size
% 8)
104 return ERR_PTR(-EINVAL
);
106 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id
) % sizeof(u64
));
107 if (attr
->map_flags
& BPF_F_STACK_BUILD_ID
) {
108 if (value_size
% sizeof(struct bpf_stack_build_id
) ||
109 value_size
/ sizeof(struct bpf_stack_build_id
)
110 > sysctl_perf_event_max_stack
)
111 return ERR_PTR(-EINVAL
);
112 } else if (value_size
/ 8 > sysctl_perf_event_max_stack
)
113 return ERR_PTR(-EINVAL
);
115 /* hash table size must be power of 2 */
116 n_buckets
= roundup_pow_of_two(attr
->max_entries
);
118 cost
= n_buckets
* sizeof(struct stack_map_bucket
*) + sizeof(*smap
);
119 cost
+= n_buckets
* (value_size
+ sizeof(struct stack_map_bucket
));
120 err
= bpf_map_charge_init(&mem
, cost
);
124 smap
= bpf_map_area_alloc(cost
, bpf_map_attr_numa_node(attr
));
126 bpf_map_charge_finish(&mem
);
127 return ERR_PTR(-ENOMEM
);
130 bpf_map_init_from_attr(&smap
->map
, attr
);
131 smap
->map
.value_size
= value_size
;
132 smap
->n_buckets
= n_buckets
;
134 err
= get_callchain_buffers(sysctl_perf_event_max_stack
);
138 err
= prealloc_elems_and_freelist(smap
);
142 bpf_map_charge_move(&smap
->map
.memory
, &mem
);
147 put_callchain_buffers();
149 bpf_map_charge_finish(&mem
);
150 bpf_map_area_free(smap
);
154 #define BPF_BUILD_ID 3
156 * Parse build id from the note segment. This logic can be shared between
157 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
160 static inline int stack_map_parse_build_id(void *page_addr
,
161 unsigned char *build_id
,
163 Elf32_Word note_size
)
165 Elf32_Word note_offs
= 0, new_offs
;
167 /* check for overflow */
168 if (note_start
< page_addr
|| note_start
+ note_size
< note_start
)
171 /* only supports note that fits in the first page */
172 if (note_start
+ note_size
> page_addr
+ PAGE_SIZE
)
175 while (note_offs
+ sizeof(Elf32_Nhdr
) < note_size
) {
176 Elf32_Nhdr
*nhdr
= (Elf32_Nhdr
*)(note_start
+ note_offs
);
178 if (nhdr
->n_type
== BPF_BUILD_ID
&&
179 nhdr
->n_namesz
== sizeof("GNU") &&
180 nhdr
->n_descsz
> 0 &&
181 nhdr
->n_descsz
<= BPF_BUILD_ID_SIZE
) {
183 note_start
+ note_offs
+
184 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr
),
186 memset(build_id
+ nhdr
->n_descsz
, 0,
187 BPF_BUILD_ID_SIZE
- nhdr
->n_descsz
);
190 new_offs
= note_offs
+ sizeof(Elf32_Nhdr
) +
191 ALIGN(nhdr
->n_namesz
, 4) + ALIGN(nhdr
->n_descsz
, 4);
192 if (new_offs
<= note_offs
) /* overflow */
194 note_offs
= new_offs
;
199 /* Parse build ID from 32-bit ELF */
200 static int stack_map_get_build_id_32(void *page_addr
,
201 unsigned char *build_id
)
203 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)page_addr
;
207 /* only supports phdr that fits in one page */
209 (PAGE_SIZE
- sizeof(Elf32_Ehdr
)) / sizeof(Elf32_Phdr
))
212 phdr
= (Elf32_Phdr
*)(page_addr
+ sizeof(Elf32_Ehdr
));
214 for (i
= 0; i
< ehdr
->e_phnum
; ++i
)
215 if (phdr
[i
].p_type
== PT_NOTE
)
216 return stack_map_parse_build_id(page_addr
, build_id
,
217 page_addr
+ phdr
[i
].p_offset
,
222 /* Parse build ID from 64-bit ELF */
223 static int stack_map_get_build_id_64(void *page_addr
,
224 unsigned char *build_id
)
226 Elf64_Ehdr
*ehdr
= (Elf64_Ehdr
*)page_addr
;
230 /* only supports phdr that fits in one page */
232 (PAGE_SIZE
- sizeof(Elf64_Ehdr
)) / sizeof(Elf64_Phdr
))
235 phdr
= (Elf64_Phdr
*)(page_addr
+ sizeof(Elf64_Ehdr
));
237 for (i
= 0; i
< ehdr
->e_phnum
; ++i
)
238 if (phdr
[i
].p_type
== PT_NOTE
)
239 return stack_map_parse_build_id(page_addr
, build_id
,
240 page_addr
+ phdr
[i
].p_offset
,
245 /* Parse build ID of ELF file mapped to vma */
246 static int stack_map_get_build_id(struct vm_area_struct
*vma
,
247 unsigned char *build_id
)
254 /* only works for page backed storage */
258 page
= find_get_page(vma
->vm_file
->f_mapping
, 0);
260 return -EFAULT
; /* page not mapped */
263 page_addr
= kmap_atomic(page
);
264 ehdr
= (Elf32_Ehdr
*)page_addr
;
266 /* compare magic x7f "ELF" */
267 if (memcmp(ehdr
->e_ident
, ELFMAG
, SELFMAG
) != 0)
270 /* only support executable file and shared object file */
271 if (ehdr
->e_type
!= ET_EXEC
&& ehdr
->e_type
!= ET_DYN
)
274 if (ehdr
->e_ident
[EI_CLASS
] == ELFCLASS32
)
275 ret
= stack_map_get_build_id_32(page_addr
, build_id
);
276 else if (ehdr
->e_ident
[EI_CLASS
] == ELFCLASS64
)
277 ret
= stack_map_get_build_id_64(page_addr
, build_id
);
279 kunmap_atomic(page_addr
);
284 static void stack_map_get_build_id_offset(struct bpf_stack_build_id
*id_offs
,
285 u64
*ips
, u32 trace_nr
, bool user
)
288 struct vm_area_struct
*vma
;
289 bool irq_work_busy
= false;
290 struct stack_map_irq_work
*work
= NULL
;
292 if (irqs_disabled()) {
293 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
294 work
= this_cpu_ptr(&up_read_work
);
295 if (atomic_read(&work
->irq_work
.flags
) & IRQ_WORK_BUSY
) {
296 /* cannot queue more up_read, fallback */
297 irq_work_busy
= true;
301 * PREEMPT_RT does not allow to trylock mmap sem in
302 * interrupt disabled context. Force the fallback code.
304 irq_work_busy
= true;
309 * We cannot do up_read() when the irq is disabled, because of
310 * risk to deadlock with rq_lock. To do build_id lookup when the
311 * irqs are disabled, we need to run up_read() in irq_work. We use
312 * a percpu variable to do the irq_work. If the irq_work is
313 * already used by another lookup, we fall back to report ips.
315 * Same fallback is used for kernel stack (!user) on a stackmap
318 if (!user
|| !current
|| !current
->mm
|| irq_work_busy
||
319 !mmap_read_trylock_non_owner(current
->mm
)) {
320 /* cannot access current->mm, fall back to ips */
321 for (i
= 0; i
< trace_nr
; i
++) {
322 id_offs
[i
].status
= BPF_STACK_BUILD_ID_IP
;
323 id_offs
[i
].ip
= ips
[i
];
324 memset(id_offs
[i
].build_id
, 0, BPF_BUILD_ID_SIZE
);
329 for (i
= 0; i
< trace_nr
; i
++) {
330 vma
= find_vma(current
->mm
, ips
[i
]);
331 if (!vma
|| stack_map_get_build_id(vma
, id_offs
[i
].build_id
)) {
332 /* per entry fall back to ips */
333 id_offs
[i
].status
= BPF_STACK_BUILD_ID_IP
;
334 id_offs
[i
].ip
= ips
[i
];
335 memset(id_offs
[i
].build_id
, 0, BPF_BUILD_ID_SIZE
);
338 id_offs
[i
].offset
= (vma
->vm_pgoff
<< PAGE_SHIFT
) + ips
[i
]
340 id_offs
[i
].status
= BPF_STACK_BUILD_ID_VALID
;
344 mmap_read_unlock_non_owner(current
->mm
);
346 work
->mm
= current
->mm
;
347 irq_work_queue(&work
->irq_work
);
351 BPF_CALL_3(bpf_get_stackid
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
354 struct bpf_stack_map
*smap
= container_of(map
, struct bpf_stack_map
, map
);
355 struct perf_callchain_entry
*trace
;
356 struct stack_map_bucket
*bucket
, *new_bucket
, *old_bucket
;
357 u32 max_depth
= map
->value_size
/ stack_map_data_size(map
);
358 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
359 u32 init_nr
= sysctl_perf_event_max_stack
- max_depth
;
360 u32 skip
= flags
& BPF_F_SKIP_FIELD_MASK
;
361 u32 hash
, id
, trace_nr
, trace_len
;
362 bool user
= flags
& BPF_F_USER_STACK
;
367 if (unlikely(flags
& ~(BPF_F_SKIP_FIELD_MASK
| BPF_F_USER_STACK
|
368 BPF_F_FAST_STACK_CMP
| BPF_F_REUSE_STACKID
)))
371 trace
= get_perf_callchain(regs
, init_nr
, kernel
, user
,
372 sysctl_perf_event_max_stack
, false, false);
374 if (unlikely(!trace
))
375 /* couldn't fetch the stack trace */
378 /* get_perf_callchain() guarantees that trace->nr >= init_nr
379 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
381 trace_nr
= trace
->nr
- init_nr
;
383 if (trace_nr
<= skip
)
384 /* skipping more than usable stack trace */
388 trace_len
= trace_nr
* sizeof(u64
);
389 ips
= trace
->ip
+ skip
+ init_nr
;
390 hash
= jhash2((u32
*)ips
, trace_len
/ sizeof(u32
), 0);
391 id
= hash
& (smap
->n_buckets
- 1);
392 bucket
= READ_ONCE(smap
->buckets
[id
]);
394 hash_matches
= bucket
&& bucket
->hash
== hash
;
396 if (hash_matches
&& flags
& BPF_F_FAST_STACK_CMP
)
399 if (stack_map_use_build_id(map
)) {
400 /* for build_id+offset, pop a bucket before slow cmp */
401 new_bucket
= (struct stack_map_bucket
*)
402 pcpu_freelist_pop(&smap
->freelist
);
403 if (unlikely(!new_bucket
))
405 new_bucket
->nr
= trace_nr
;
406 stack_map_get_build_id_offset(
407 (struct bpf_stack_build_id
*)new_bucket
->data
,
408 ips
, trace_nr
, user
);
409 trace_len
= trace_nr
* sizeof(struct bpf_stack_build_id
);
410 if (hash_matches
&& bucket
->nr
== trace_nr
&&
411 memcmp(bucket
->data
, new_bucket
->data
, trace_len
) == 0) {
412 pcpu_freelist_push(&smap
->freelist
, &new_bucket
->fnode
);
415 if (bucket
&& !(flags
& BPF_F_REUSE_STACKID
)) {
416 pcpu_freelist_push(&smap
->freelist
, &new_bucket
->fnode
);
420 if (hash_matches
&& bucket
->nr
== trace_nr
&&
421 memcmp(bucket
->data
, ips
, trace_len
) == 0)
423 if (bucket
&& !(flags
& BPF_F_REUSE_STACKID
))
426 new_bucket
= (struct stack_map_bucket
*)
427 pcpu_freelist_pop(&smap
->freelist
);
428 if (unlikely(!new_bucket
))
430 memcpy(new_bucket
->data
, ips
, trace_len
);
433 new_bucket
->hash
= hash
;
434 new_bucket
->nr
= trace_nr
;
436 old_bucket
= xchg(&smap
->buckets
[id
], new_bucket
);
438 pcpu_freelist_push(&smap
->freelist
, &old_bucket
->fnode
);
442 const struct bpf_func_proto bpf_get_stackid_proto
= {
443 .func
= bpf_get_stackid
,
445 .ret_type
= RET_INTEGER
,
446 .arg1_type
= ARG_PTR_TO_CTX
,
447 .arg2_type
= ARG_CONST_MAP_PTR
,
448 .arg3_type
= ARG_ANYTHING
,
451 BPF_CALL_4(bpf_get_stack
, struct pt_regs
*, regs
, void *, buf
, u32
, size
,
454 u32 init_nr
, trace_nr
, copy_len
, elem_size
, num_elem
;
455 bool user_build_id
= flags
& BPF_F_USER_BUILD_ID
;
456 u32 skip
= flags
& BPF_F_SKIP_FIELD_MASK
;
457 bool user
= flags
& BPF_F_USER_STACK
;
458 struct perf_callchain_entry
*trace
;
463 if (unlikely(flags
& ~(BPF_F_SKIP_FIELD_MASK
| BPF_F_USER_STACK
|
464 BPF_F_USER_BUILD_ID
)))
466 if (kernel
&& user_build_id
)
469 elem_size
= (user
&& user_build_id
) ? sizeof(struct bpf_stack_build_id
)
471 if (unlikely(size
% elem_size
))
474 num_elem
= size
/ elem_size
;
475 if (sysctl_perf_event_max_stack
< num_elem
)
478 init_nr
= sysctl_perf_event_max_stack
- num_elem
;
479 trace
= get_perf_callchain(regs
, init_nr
, kernel
, user
,
480 sysctl_perf_event_max_stack
, false, false);
481 if (unlikely(!trace
))
484 trace_nr
= trace
->nr
- init_nr
;
489 trace_nr
= (trace_nr
<= num_elem
) ? trace_nr
: num_elem
;
490 copy_len
= trace_nr
* elem_size
;
491 ips
= trace
->ip
+ skip
+ init_nr
;
492 if (user
&& user_build_id
)
493 stack_map_get_build_id_offset(buf
, ips
, trace_nr
, user
);
495 memcpy(buf
, ips
, copy_len
);
498 memset(buf
+ copy_len
, 0, size
- copy_len
);
504 memset(buf
, 0, size
);
508 const struct bpf_func_proto bpf_get_stack_proto
= {
509 .func
= bpf_get_stack
,
511 .ret_type
= RET_INTEGER
,
512 .arg1_type
= ARG_PTR_TO_CTX
,
513 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
514 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
515 .arg4_type
= ARG_ANYTHING
,
518 /* Called from eBPF program */
519 static void *stack_map_lookup_elem(struct bpf_map
*map
, void *key
)
521 return ERR_PTR(-EOPNOTSUPP
);
524 /* Called from syscall */
525 int bpf_stackmap_copy(struct bpf_map
*map
, void *key
, void *value
)
527 struct bpf_stack_map
*smap
= container_of(map
, struct bpf_stack_map
, map
);
528 struct stack_map_bucket
*bucket
, *old_bucket
;
529 u32 id
= *(u32
*)key
, trace_len
;
531 if (unlikely(id
>= smap
->n_buckets
))
534 bucket
= xchg(&smap
->buckets
[id
], NULL
);
538 trace_len
= bucket
->nr
* stack_map_data_size(map
);
539 memcpy(value
, bucket
->data
, trace_len
);
540 memset(value
+ trace_len
, 0, map
->value_size
- trace_len
);
542 old_bucket
= xchg(&smap
->buckets
[id
], bucket
);
544 pcpu_freelist_push(&smap
->freelist
, &old_bucket
->fnode
);
548 static int stack_map_get_next_key(struct bpf_map
*map
, void *key
,
551 struct bpf_stack_map
*smap
= container_of(map
,
552 struct bpf_stack_map
, map
);
555 WARN_ON_ONCE(!rcu_read_lock_held());
561 if (id
>= smap
->n_buckets
|| !smap
->buckets
[id
])
567 while (id
< smap
->n_buckets
&& !smap
->buckets
[id
])
570 if (id
>= smap
->n_buckets
)
573 *(u32
*)next_key
= id
;
577 static int stack_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
583 /* Called from syscall or from eBPF program */
584 static int stack_map_delete_elem(struct bpf_map
*map
, void *key
)
586 struct bpf_stack_map
*smap
= container_of(map
, struct bpf_stack_map
, map
);
587 struct stack_map_bucket
*old_bucket
;
588 u32 id
= *(u32
*)key
;
590 if (unlikely(id
>= smap
->n_buckets
))
593 old_bucket
= xchg(&smap
->buckets
[id
], NULL
);
595 pcpu_freelist_push(&smap
->freelist
, &old_bucket
->fnode
);
602 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
603 static void stack_map_free(struct bpf_map
*map
)
605 struct bpf_stack_map
*smap
= container_of(map
, struct bpf_stack_map
, map
);
607 /* wait for bpf programs to complete before freeing stack map */
610 bpf_map_area_free(smap
->elems
);
611 pcpu_freelist_destroy(&smap
->freelist
);
612 bpf_map_area_free(smap
);
613 put_callchain_buffers();
616 const struct bpf_map_ops stack_trace_map_ops
= {
617 .map_alloc
= stack_map_alloc
,
618 .map_free
= stack_map_free
,
619 .map_get_next_key
= stack_map_get_next_key
,
620 .map_lookup_elem
= stack_map_lookup_elem
,
621 .map_update_elem
= stack_map_update_elem
,
622 .map_delete_elem
= stack_map_delete_elem
,
623 .map_check_btf
= map_check_no_btf
,
626 static int __init
stack_map_init(void)
629 struct stack_map_irq_work
*work
;
631 for_each_possible_cpu(cpu
) {
632 work
= per_cpu_ptr(&up_read_work
, cpu
);
633 init_irq_work(&work
->irq_work
, do_up_read
);
637 subsys_initcall(stack_map_init
);