drm/panel-edp: Add BOE NV140FHM-NZ panel entry
[drm/drm-misc.git] / kernel / events / uprobes.c
blobfa04b14a7d72353adc440742016b813da6c812d2
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * User-space Probes (UProbes)
5 * Copyright (C) IBM Corporation, 2008-2012
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h> /* read_mapping_page */
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/export.h>
19 #include <linux/rmap.h> /* anon_vma_prepare */
20 #include <linux/mmu_notifier.h>
21 #include <linux/swap.h> /* folio_free_swap */
22 #include <linux/ptrace.h> /* user_enable_single_step */
23 #include <linux/kdebug.h> /* notifier mechanism */
24 #include <linux/percpu-rwsem.h>
25 #include <linux/task_work.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/khugepaged.h>
28 #include <linux/rcupdate_trace.h>
29 #include <linux/workqueue.h>
30 #include <linux/srcu.h>
32 #include <linux/uprobes.h>
34 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
35 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
37 static struct rb_root uprobes_tree = RB_ROOT;
39 * allows us to skip the uprobe_mmap if there are no uprobe events active
40 * at this time. Probably a fine grained per inode count is better?
42 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
44 static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
45 static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
47 #define UPROBES_HASH_SZ 13
48 /* serialize uprobe->pending_list */
49 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
50 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
52 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
54 /* Covers return_instance's uprobe lifetime. */
55 DEFINE_STATIC_SRCU(uretprobes_srcu);
57 /* Have a copy of original instruction */
58 #define UPROBE_COPY_INSN 0
60 struct uprobe {
61 struct rb_node rb_node; /* node in the rb tree */
62 refcount_t ref;
63 struct rw_semaphore register_rwsem;
64 struct rw_semaphore consumer_rwsem;
65 struct list_head pending_list;
66 struct list_head consumers;
67 struct inode *inode; /* Also hold a ref to inode */
68 union {
69 struct rcu_head rcu;
70 struct work_struct work;
72 loff_t offset;
73 loff_t ref_ctr_offset;
74 unsigned long flags; /* "unsigned long" so bitops work */
77 * The generic code assumes that it has two members of unknown type
78 * owned by the arch-specific code:
80 * insn - copy_insn() saves the original instruction here for
81 * arch_uprobe_analyze_insn().
83 * ixol - potentially modified instruction to execute out of
84 * line, copied to xol_area by xol_get_insn_slot().
86 struct arch_uprobe arch;
89 struct delayed_uprobe {
90 struct list_head list;
91 struct uprobe *uprobe;
92 struct mm_struct *mm;
95 static DEFINE_MUTEX(delayed_uprobe_lock);
96 static LIST_HEAD(delayed_uprobe_list);
99 * Execute out of line area: anonymous executable mapping installed
100 * by the probed task to execute the copy of the original instruction
101 * mangled by set_swbp().
103 * On a breakpoint hit, thread contests for a slot. It frees the
104 * slot after singlestep. Currently a fixed number of slots are
105 * allocated.
107 struct xol_area {
108 wait_queue_head_t wq; /* if all slots are busy */
109 unsigned long *bitmap; /* 0 = free slot */
111 struct page *page;
113 * We keep the vma's vm_start rather than a pointer to the vma
114 * itself. The probed process or a naughty kernel module could make
115 * the vma go away, and we must handle that reasonably gracefully.
117 unsigned long vaddr; /* Page(s) of instruction slots */
120 static void uprobe_warn(struct task_struct *t, const char *msg)
122 pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
126 * valid_vma: Verify if the specified vma is an executable vma
127 * Relax restrictions while unregistering: vm_flags might have
128 * changed after breakpoint was inserted.
129 * - is_register: indicates if we are in register context.
130 * - Return 1 if the specified virtual address is in an
131 * executable vma.
133 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
135 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
137 if (is_register)
138 flags |= VM_WRITE;
140 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
143 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
145 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
148 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
150 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
154 * __replace_page - replace page in vma by new page.
155 * based on replace_page in mm/ksm.c
157 * @vma: vma that holds the pte pointing to page
158 * @addr: address the old @page is mapped at
159 * @old_page: the page we are replacing by new_page
160 * @new_page: the modified page we replace page by
162 * If @new_page is NULL, only unmap @old_page.
164 * Returns 0 on success, negative error code otherwise.
166 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
167 struct page *old_page, struct page *new_page)
169 struct folio *old_folio = page_folio(old_page);
170 struct folio *new_folio;
171 struct mm_struct *mm = vma->vm_mm;
172 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
173 int err;
174 struct mmu_notifier_range range;
176 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
177 addr + PAGE_SIZE);
179 if (new_page) {
180 new_folio = page_folio(new_page);
181 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
182 if (err)
183 return err;
186 /* For folio_free_swap() below */
187 folio_lock(old_folio);
189 mmu_notifier_invalidate_range_start(&range);
190 err = -EAGAIN;
191 if (!page_vma_mapped_walk(&pvmw))
192 goto unlock;
193 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
195 if (new_page) {
196 folio_get(new_folio);
197 folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
198 folio_add_lru_vma(new_folio, vma);
199 } else
200 /* no new page, just dec_mm_counter for old_page */
201 dec_mm_counter(mm, MM_ANONPAGES);
203 if (!folio_test_anon(old_folio)) {
204 dec_mm_counter(mm, mm_counter_file(old_folio));
205 inc_mm_counter(mm, MM_ANONPAGES);
208 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
209 ptep_clear_flush(vma, addr, pvmw.pte);
210 if (new_page)
211 set_pte_at(mm, addr, pvmw.pte,
212 mk_pte(new_page, vma->vm_page_prot));
214 folio_remove_rmap_pte(old_folio, old_page, vma);
215 if (!folio_mapped(old_folio))
216 folio_free_swap(old_folio);
217 page_vma_mapped_walk_done(&pvmw);
218 folio_put(old_folio);
220 err = 0;
221 unlock:
222 mmu_notifier_invalidate_range_end(&range);
223 folio_unlock(old_folio);
224 return err;
228 * is_swbp_insn - check if instruction is breakpoint instruction.
229 * @insn: instruction to be checked.
230 * Default implementation of is_swbp_insn
231 * Returns true if @insn is a breakpoint instruction.
233 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
235 return *insn == UPROBE_SWBP_INSN;
239 * is_trap_insn - check if instruction is breakpoint instruction.
240 * @insn: instruction to be checked.
241 * Default implementation of is_trap_insn
242 * Returns true if @insn is a breakpoint instruction.
244 * This function is needed for the case where an architecture has multiple
245 * trap instructions (like powerpc).
247 bool __weak is_trap_insn(uprobe_opcode_t *insn)
249 return is_swbp_insn(insn);
252 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
254 void *kaddr = kmap_atomic(page);
255 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
256 kunmap_atomic(kaddr);
259 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
261 void *kaddr = kmap_atomic(page);
262 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
263 kunmap_atomic(kaddr);
266 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
268 uprobe_opcode_t old_opcode;
269 bool is_swbp;
272 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
273 * We do not check if it is any other 'trap variant' which could
274 * be conditional trap instruction such as the one powerpc supports.
276 * The logic is that we do not care if the underlying instruction
277 * is a trap variant; uprobes always wins over any other (gdb)
278 * breakpoint.
280 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
281 is_swbp = is_swbp_insn(&old_opcode);
283 if (is_swbp_insn(new_opcode)) {
284 if (is_swbp) /* register: already installed? */
285 return 0;
286 } else {
287 if (!is_swbp) /* unregister: was it changed by us? */
288 return 0;
291 return 1;
294 static struct delayed_uprobe *
295 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
297 struct delayed_uprobe *du;
299 list_for_each_entry(du, &delayed_uprobe_list, list)
300 if (du->uprobe == uprobe && du->mm == mm)
301 return du;
302 return NULL;
305 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
307 struct delayed_uprobe *du;
309 if (delayed_uprobe_check(uprobe, mm))
310 return 0;
312 du = kzalloc(sizeof(*du), GFP_KERNEL);
313 if (!du)
314 return -ENOMEM;
316 du->uprobe = uprobe;
317 du->mm = mm;
318 list_add(&du->list, &delayed_uprobe_list);
319 return 0;
322 static void delayed_uprobe_delete(struct delayed_uprobe *du)
324 if (WARN_ON(!du))
325 return;
326 list_del(&du->list);
327 kfree(du);
330 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
332 struct list_head *pos, *q;
333 struct delayed_uprobe *du;
335 if (!uprobe && !mm)
336 return;
338 list_for_each_safe(pos, q, &delayed_uprobe_list) {
339 du = list_entry(pos, struct delayed_uprobe, list);
341 if (uprobe && du->uprobe != uprobe)
342 continue;
343 if (mm && du->mm != mm)
344 continue;
346 delayed_uprobe_delete(du);
350 static bool valid_ref_ctr_vma(struct uprobe *uprobe,
351 struct vm_area_struct *vma)
353 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
355 return uprobe->ref_ctr_offset &&
356 vma->vm_file &&
357 file_inode(vma->vm_file) == uprobe->inode &&
358 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
359 vma->vm_start <= vaddr &&
360 vma->vm_end > vaddr;
363 static struct vm_area_struct *
364 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
366 VMA_ITERATOR(vmi, mm, 0);
367 struct vm_area_struct *tmp;
369 for_each_vma(vmi, tmp)
370 if (valid_ref_ctr_vma(uprobe, tmp))
371 return tmp;
373 return NULL;
376 static int
377 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
379 void *kaddr;
380 struct page *page;
381 int ret;
382 short *ptr;
384 if (!vaddr || !d)
385 return -EINVAL;
387 ret = get_user_pages_remote(mm, vaddr, 1,
388 FOLL_WRITE, &page, NULL);
389 if (unlikely(ret <= 0)) {
391 * We are asking for 1 page. If get_user_pages_remote() fails,
392 * it may return 0, in that case we have to return error.
394 return ret == 0 ? -EBUSY : ret;
397 kaddr = kmap_atomic(page);
398 ptr = kaddr + (vaddr & ~PAGE_MASK);
400 if (unlikely(*ptr + d < 0)) {
401 pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
402 "curr val: %d, delta: %d\n", vaddr, *ptr, d);
403 ret = -EINVAL;
404 goto out;
407 *ptr += d;
408 ret = 0;
409 out:
410 kunmap_atomic(kaddr);
411 put_page(page);
412 return ret;
415 static void update_ref_ctr_warn(struct uprobe *uprobe,
416 struct mm_struct *mm, short d)
418 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
419 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
420 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
421 (unsigned long long) uprobe->offset,
422 (unsigned long long) uprobe->ref_ctr_offset, mm);
425 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
426 short d)
428 struct vm_area_struct *rc_vma;
429 unsigned long rc_vaddr;
430 int ret = 0;
432 rc_vma = find_ref_ctr_vma(uprobe, mm);
434 if (rc_vma) {
435 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
436 ret = __update_ref_ctr(mm, rc_vaddr, d);
437 if (ret)
438 update_ref_ctr_warn(uprobe, mm, d);
440 if (d > 0)
441 return ret;
444 mutex_lock(&delayed_uprobe_lock);
445 if (d > 0)
446 ret = delayed_uprobe_add(uprobe, mm);
447 else
448 delayed_uprobe_remove(uprobe, mm);
449 mutex_unlock(&delayed_uprobe_lock);
451 return ret;
455 * NOTE:
456 * Expect the breakpoint instruction to be the smallest size instruction for
457 * the architecture. If an arch has variable length instruction and the
458 * breakpoint instruction is not of the smallest length instruction
459 * supported by that architecture then we need to modify is_trap_at_addr and
460 * uprobe_write_opcode accordingly. This would never be a problem for archs
461 * that have fixed length instructions.
463 * uprobe_write_opcode - write the opcode at a given virtual address.
464 * @auprobe: arch specific probepoint information.
465 * @mm: the probed process address space.
466 * @vaddr: the virtual address to store the opcode.
467 * @opcode: opcode to be written at @vaddr.
469 * Called with mm->mmap_lock held for read or write.
470 * Return 0 (success) or a negative errno.
472 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
473 unsigned long vaddr, uprobe_opcode_t opcode)
475 struct uprobe *uprobe;
476 struct page *old_page, *new_page;
477 struct vm_area_struct *vma;
478 int ret, is_register, ref_ctr_updated = 0;
479 bool orig_page_huge = false;
480 unsigned int gup_flags = FOLL_FORCE;
482 is_register = is_swbp_insn(&opcode);
483 uprobe = container_of(auprobe, struct uprobe, arch);
485 retry:
486 if (is_register)
487 gup_flags |= FOLL_SPLIT_PMD;
488 /* Read the page with vaddr into memory */
489 old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
490 if (IS_ERR(old_page))
491 return PTR_ERR(old_page);
493 ret = verify_opcode(old_page, vaddr, &opcode);
494 if (ret <= 0)
495 goto put_old;
497 if (WARN(!is_register && PageCompound(old_page),
498 "uprobe unregister should never work on compound page\n")) {
499 ret = -EINVAL;
500 goto put_old;
503 /* We are going to replace instruction, update ref_ctr. */
504 if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
505 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
506 if (ret)
507 goto put_old;
509 ref_ctr_updated = 1;
512 ret = 0;
513 if (!is_register && !PageAnon(old_page))
514 goto put_old;
516 ret = anon_vma_prepare(vma);
517 if (ret)
518 goto put_old;
520 ret = -ENOMEM;
521 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
522 if (!new_page)
523 goto put_old;
525 __SetPageUptodate(new_page);
526 copy_highpage(new_page, old_page);
527 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
529 if (!is_register) {
530 struct page *orig_page;
531 pgoff_t index;
533 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
535 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
536 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
537 index);
539 if (orig_page) {
540 if (PageUptodate(orig_page) &&
541 pages_identical(new_page, orig_page)) {
542 /* let go new_page */
543 put_page(new_page);
544 new_page = NULL;
546 if (PageCompound(orig_page))
547 orig_page_huge = true;
549 put_page(orig_page);
553 ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
554 if (new_page)
555 put_page(new_page);
556 put_old:
557 put_page(old_page);
559 if (unlikely(ret == -EAGAIN))
560 goto retry;
562 /* Revert back reference counter if instruction update failed. */
563 if (ret && is_register && ref_ctr_updated)
564 update_ref_ctr(uprobe, mm, -1);
566 /* try collapse pmd for compound page */
567 if (!ret && orig_page_huge)
568 collapse_pte_mapped_thp(mm, vaddr, false);
570 return ret;
574 * set_swbp - store breakpoint at a given address.
575 * @auprobe: arch specific probepoint information.
576 * @mm: the probed process address space.
577 * @vaddr: the virtual address to insert the opcode.
579 * For mm @mm, store the breakpoint instruction at @vaddr.
580 * Return 0 (success) or a negative errno.
582 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
584 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
588 * set_orig_insn - Restore the original instruction.
589 * @mm: the probed process address space.
590 * @auprobe: arch specific probepoint information.
591 * @vaddr: the virtual address to insert the opcode.
593 * For mm @mm, restore the original opcode (opcode) at @vaddr.
594 * Return 0 (success) or a negative errno.
596 int __weak
597 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
599 return uprobe_write_opcode(auprobe, mm, vaddr,
600 *(uprobe_opcode_t *)&auprobe->insn);
603 /* uprobe should have guaranteed positive refcount */
604 static struct uprobe *get_uprobe(struct uprobe *uprobe)
606 refcount_inc(&uprobe->ref);
607 return uprobe;
611 * uprobe should have guaranteed lifetime, which can be either of:
612 * - caller already has refcount taken (and wants an extra one);
613 * - uprobe is RCU protected and won't be freed until after grace period;
614 * - we are holding uprobes_treelock (for read or write, doesn't matter).
616 static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
618 if (refcount_inc_not_zero(&uprobe->ref))
619 return uprobe;
620 return NULL;
623 static inline bool uprobe_is_active(struct uprobe *uprobe)
625 return !RB_EMPTY_NODE(&uprobe->rb_node);
628 static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu)
630 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
632 kfree(uprobe);
635 static void uprobe_free_srcu(struct rcu_head *rcu)
637 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
639 call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace);
642 static void uprobe_free_deferred(struct work_struct *work)
644 struct uprobe *uprobe = container_of(work, struct uprobe, work);
646 write_lock(&uprobes_treelock);
648 if (uprobe_is_active(uprobe)) {
649 write_seqcount_begin(&uprobes_seqcount);
650 rb_erase(&uprobe->rb_node, &uprobes_tree);
651 write_seqcount_end(&uprobes_seqcount);
654 write_unlock(&uprobes_treelock);
657 * If application munmap(exec_vma) before uprobe_unregister()
658 * gets called, we don't get a chance to remove uprobe from
659 * delayed_uprobe_list from remove_breakpoint(). Do it here.
661 mutex_lock(&delayed_uprobe_lock);
662 delayed_uprobe_remove(uprobe, NULL);
663 mutex_unlock(&delayed_uprobe_lock);
665 /* start srcu -> rcu_tasks_trace -> kfree chain */
666 call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu);
669 static void put_uprobe(struct uprobe *uprobe)
671 if (!refcount_dec_and_test(&uprobe->ref))
672 return;
674 INIT_WORK(&uprobe->work, uprobe_free_deferred);
675 schedule_work(&uprobe->work);
678 /* Initialize hprobe as SRCU-protected "leased" uprobe */
679 static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx)
681 WARN_ON(!uprobe);
682 hprobe->state = HPROBE_LEASED;
683 hprobe->uprobe = uprobe;
684 hprobe->srcu_idx = srcu_idx;
687 /* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */
688 static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe)
690 hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE;
691 hprobe->uprobe = uprobe;
692 hprobe->srcu_idx = -1;
696 * hprobe_consume() fetches hprobe's underlying uprobe and detects whether
697 * uprobe is SRCU protected or is refcounted. hprobe_consume() can be
698 * used only once for a given hprobe.
700 * Caller has to call hprobe_finalize() and pass previous hprobe_state, so
701 * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever
702 * is appropriate.
704 static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate)
706 *hstate = xchg(&hprobe->state, HPROBE_CONSUMED);
707 switch (*hstate) {
708 case HPROBE_LEASED:
709 case HPROBE_STABLE:
710 return hprobe->uprobe;
711 case HPROBE_GONE: /* uprobe is NULL, no SRCU */
712 case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */
713 return NULL;
714 default:
715 WARN(1, "hprobe invalid state %d", *hstate);
716 return NULL;
721 * Reset hprobe state and, if hprobe was LEASED, release SRCU lock.
722 * hprobe_finalize() can only be used from current context after
723 * hprobe_consume() call (which determines uprobe and hstate value).
725 static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate)
727 switch (hstate) {
728 case HPROBE_LEASED:
729 __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
730 break;
731 case HPROBE_STABLE:
732 put_uprobe(hprobe->uprobe);
733 break;
734 case HPROBE_GONE:
735 case HPROBE_CONSUMED:
736 break;
737 default:
738 WARN(1, "hprobe invalid state %d", hstate);
739 break;
744 * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED)
745 * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of
746 * them can win the race to perform SRCU unlocking. Whoever wins must perform
747 * SRCU unlock.
749 * Returns underlying valid uprobe or NULL, if there was no underlying uprobe
750 * to begin with or we failed to bump its refcount and it's going away.
752 * Returned non-NULL uprobe can be still safely used within an ongoing SRCU
753 * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has
754 * an extra refcount for caller to assume and use. Otherwise, it's not
755 * guaranteed that returned uprobe has a positive refcount, so caller has to
756 * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current
757 * SRCU lock region. See dup_utask().
759 static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
761 enum hprobe_state hstate;
764 * return_instance's hprobe is protected by RCU.
765 * Underlying uprobe is itself protected from reuse by SRCU.
767 lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu));
769 hstate = READ_ONCE(hprobe->state);
770 switch (hstate) {
771 case HPROBE_STABLE:
772 /* uprobe has positive refcount, bump refcount, if necessary */
773 return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe;
774 case HPROBE_GONE:
776 * SRCU was unlocked earlier and we didn't manage to take
777 * uprobe refcnt, so it's effectively NULL
779 return NULL;
780 case HPROBE_CONSUMED:
782 * uprobe was consumed, so it's effectively NULL as far as
783 * uretprobe processing logic is concerned
785 return NULL;
786 case HPROBE_LEASED: {
787 struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe);
789 * Try to switch hprobe state, guarding against
790 * hprobe_consume() or another hprobe_expire() racing with us.
791 * Note, if we failed to get uprobe refcount, we use special
792 * HPROBE_GONE state to signal that hprobe->uprobe shouldn't
793 * be used as it will be freed after SRCU is unlocked.
795 if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) {
796 /* We won the race, we are the ones to unlock SRCU */
797 __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
798 return get ? get_uprobe(uprobe) : uprobe;
802 * We lost the race, undo refcount bump (if it ever happened),
803 * unless caller would like an extra refcount anyways.
805 if (uprobe && !get)
806 put_uprobe(uprobe);
808 * Even if hprobe_consume() or another hprobe_expire() wins
809 * the state update race and unlocks SRCU from under us, we
810 * still have a guarantee that underyling uprobe won't be
811 * freed due to ongoing caller's SRCU lock region, so we can
812 * return it regardless. Also, if `get` was true, we also have
813 * an extra ref for the caller to own. This is used in dup_utask().
815 return uprobe;
817 default:
818 WARN(1, "unknown hprobe state %d", hstate);
819 return NULL;
823 static __always_inline
824 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
825 const struct uprobe *r)
827 if (l_inode < r->inode)
828 return -1;
830 if (l_inode > r->inode)
831 return 1;
833 if (l_offset < r->offset)
834 return -1;
836 if (l_offset > r->offset)
837 return 1;
839 return 0;
842 #define __node_2_uprobe(node) \
843 rb_entry((node), struct uprobe, rb_node)
845 struct __uprobe_key {
846 struct inode *inode;
847 loff_t offset;
850 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
852 const struct __uprobe_key *a = key;
853 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
856 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
858 struct uprobe *u = __node_2_uprobe(a);
859 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
863 * Assumes being inside RCU protected region.
864 * No refcount is taken on returned uprobe.
866 static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
868 struct __uprobe_key key = {
869 .inode = inode,
870 .offset = offset,
872 struct rb_node *node;
873 unsigned int seq;
875 lockdep_assert(rcu_read_lock_trace_held());
877 do {
878 seq = read_seqcount_begin(&uprobes_seqcount);
879 node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
881 * Lockless RB-tree lookups can result only in false negatives.
882 * If the element is found, it is correct and can be returned
883 * under RCU protection. If we find nothing, we need to
884 * validate that seqcount didn't change. If it did, we have to
885 * try again as we might have missed the element (false
886 * negative). If seqcount is unchanged, search truly failed.
888 if (node)
889 return __node_2_uprobe(node);
890 } while (read_seqcount_retry(&uprobes_seqcount, seq));
892 return NULL;
896 * Attempt to insert a new uprobe into uprobes_tree.
898 * If uprobe already exists (for given inode+offset), we just increment
899 * refcount of previously existing uprobe.
901 * If not, a provided new instance of uprobe is inserted into the tree (with
902 * assumed initial refcount == 1).
904 * In any case, we return a uprobe instance that ends up being in uprobes_tree.
905 * Caller has to clean up new uprobe instance, if it ended up not being
906 * inserted into the tree.
908 * We assume that uprobes_treelock is held for writing.
910 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
912 struct rb_node *node;
913 again:
914 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
915 if (node) {
916 struct uprobe *u = __node_2_uprobe(node);
918 if (!try_get_uprobe(u)) {
919 rb_erase(node, &uprobes_tree);
920 RB_CLEAR_NODE(&u->rb_node);
921 goto again;
924 return u;
927 return uprobe;
931 * Acquire uprobes_treelock and insert uprobe into uprobes_tree
932 * (or reuse existing one, see __insert_uprobe() comments above).
934 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
936 struct uprobe *u;
938 write_lock(&uprobes_treelock);
939 write_seqcount_begin(&uprobes_seqcount);
940 u = __insert_uprobe(uprobe);
941 write_seqcount_end(&uprobes_seqcount);
942 write_unlock(&uprobes_treelock);
944 return u;
947 static void
948 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
950 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
951 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
952 uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
953 (unsigned long long) cur_uprobe->ref_ctr_offset,
954 (unsigned long long) uprobe->ref_ctr_offset);
957 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
958 loff_t ref_ctr_offset)
960 struct uprobe *uprobe, *cur_uprobe;
962 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
963 if (!uprobe)
964 return ERR_PTR(-ENOMEM);
966 uprobe->inode = inode;
967 uprobe->offset = offset;
968 uprobe->ref_ctr_offset = ref_ctr_offset;
969 INIT_LIST_HEAD(&uprobe->consumers);
970 init_rwsem(&uprobe->register_rwsem);
971 init_rwsem(&uprobe->consumer_rwsem);
972 RB_CLEAR_NODE(&uprobe->rb_node);
973 refcount_set(&uprobe->ref, 1);
975 /* add to uprobes_tree, sorted on inode:offset */
976 cur_uprobe = insert_uprobe(uprobe);
977 /* a uprobe exists for this inode:offset combination */
978 if (cur_uprobe != uprobe) {
979 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
980 ref_ctr_mismatch_warn(cur_uprobe, uprobe);
981 put_uprobe(cur_uprobe);
982 kfree(uprobe);
983 return ERR_PTR(-EINVAL);
985 kfree(uprobe);
986 uprobe = cur_uprobe;
989 return uprobe;
992 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
994 static atomic64_t id;
996 down_write(&uprobe->consumer_rwsem);
997 list_add_rcu(&uc->cons_node, &uprobe->consumers);
998 uc->id = (__u64) atomic64_inc_return(&id);
999 up_write(&uprobe->consumer_rwsem);
1003 * For uprobe @uprobe, delete the consumer @uc.
1004 * Should never be called with consumer that's not part of @uprobe->consumers.
1006 static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
1008 down_write(&uprobe->consumer_rwsem);
1009 list_del_rcu(&uc->cons_node);
1010 up_write(&uprobe->consumer_rwsem);
1013 static int __copy_insn(struct address_space *mapping, struct file *filp,
1014 void *insn, int nbytes, loff_t offset)
1016 struct page *page;
1018 * Ensure that the page that has the original instruction is populated
1019 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
1020 * see uprobe_register().
1022 if (mapping->a_ops->read_folio)
1023 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
1024 else
1025 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
1026 if (IS_ERR(page))
1027 return PTR_ERR(page);
1029 copy_from_page(page, offset, insn, nbytes);
1030 put_page(page);
1032 return 0;
1035 static int copy_insn(struct uprobe *uprobe, struct file *filp)
1037 struct address_space *mapping = uprobe->inode->i_mapping;
1038 loff_t offs = uprobe->offset;
1039 void *insn = &uprobe->arch.insn;
1040 int size = sizeof(uprobe->arch.insn);
1041 int len, err = -EIO;
1043 /* Copy only available bytes, -EIO if nothing was read */
1044 do {
1045 if (offs >= i_size_read(uprobe->inode))
1046 break;
1048 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
1049 err = __copy_insn(mapping, filp, insn, len, offs);
1050 if (err)
1051 break;
1053 insn += len;
1054 offs += len;
1055 size -= len;
1056 } while (size);
1058 return err;
1061 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
1062 struct mm_struct *mm, unsigned long vaddr)
1064 int ret = 0;
1066 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
1067 return ret;
1069 /* TODO: move this into _register, until then we abuse this sem. */
1070 down_write(&uprobe->consumer_rwsem);
1071 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
1072 goto out;
1074 ret = copy_insn(uprobe, file);
1075 if (ret)
1076 goto out;
1078 ret = -ENOTSUPP;
1079 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
1080 goto out;
1082 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
1083 if (ret)
1084 goto out;
1086 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
1087 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
1089 out:
1090 up_write(&uprobe->consumer_rwsem);
1092 return ret;
1095 static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
1097 return !uc->filter || uc->filter(uc, mm);
1100 static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
1102 struct uprobe_consumer *uc;
1103 bool ret = false;
1105 down_read(&uprobe->consumer_rwsem);
1106 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
1107 ret = consumer_filter(uc, mm);
1108 if (ret)
1109 break;
1111 up_read(&uprobe->consumer_rwsem);
1113 return ret;
1116 static int
1117 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
1118 struct vm_area_struct *vma, unsigned long vaddr)
1120 bool first_uprobe;
1121 int ret;
1123 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
1124 if (ret)
1125 return ret;
1128 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
1129 * the task can hit this breakpoint right after __replace_page().
1131 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
1132 if (first_uprobe)
1133 set_bit(MMF_HAS_UPROBES, &mm->flags);
1135 ret = set_swbp(&uprobe->arch, mm, vaddr);
1136 if (!ret)
1137 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
1138 else if (first_uprobe)
1139 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1141 return ret;
1144 static int
1145 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
1147 set_bit(MMF_RECALC_UPROBES, &mm->flags);
1148 return set_orig_insn(&uprobe->arch, mm, vaddr);
1151 struct map_info {
1152 struct map_info *next;
1153 struct mm_struct *mm;
1154 unsigned long vaddr;
1157 static inline struct map_info *free_map_info(struct map_info *info)
1159 struct map_info *next = info->next;
1160 kfree(info);
1161 return next;
1164 static struct map_info *
1165 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
1167 unsigned long pgoff = offset >> PAGE_SHIFT;
1168 struct vm_area_struct *vma;
1169 struct map_info *curr = NULL;
1170 struct map_info *prev = NULL;
1171 struct map_info *info;
1172 int more = 0;
1174 again:
1175 i_mmap_lock_read(mapping);
1176 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1177 if (!valid_vma(vma, is_register))
1178 continue;
1180 if (!prev && !more) {
1182 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
1183 * reclaim. This is optimistic, no harm done if it fails.
1185 prev = kmalloc(sizeof(struct map_info),
1186 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
1187 if (prev)
1188 prev->next = NULL;
1190 if (!prev) {
1191 more++;
1192 continue;
1195 if (!mmget_not_zero(vma->vm_mm))
1196 continue;
1198 info = prev;
1199 prev = prev->next;
1200 info->next = curr;
1201 curr = info;
1203 info->mm = vma->vm_mm;
1204 info->vaddr = offset_to_vaddr(vma, offset);
1206 i_mmap_unlock_read(mapping);
1208 if (!more)
1209 goto out;
1211 prev = curr;
1212 while (curr) {
1213 mmput(curr->mm);
1214 curr = curr->next;
1217 do {
1218 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1219 if (!info) {
1220 curr = ERR_PTR(-ENOMEM);
1221 goto out;
1223 info->next = prev;
1224 prev = info;
1225 } while (--more);
1227 goto again;
1228 out:
1229 while (prev)
1230 prev = free_map_info(prev);
1231 return curr;
1234 static int
1235 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1237 bool is_register = !!new;
1238 struct map_info *info;
1239 int err = 0;
1241 percpu_down_write(&dup_mmap_sem);
1242 info = build_map_info(uprobe->inode->i_mapping,
1243 uprobe->offset, is_register);
1244 if (IS_ERR(info)) {
1245 err = PTR_ERR(info);
1246 goto out;
1249 while (info) {
1250 struct mm_struct *mm = info->mm;
1251 struct vm_area_struct *vma;
1253 if (err && is_register)
1254 goto free;
1256 * We take mmap_lock for writing to avoid the race with
1257 * find_active_uprobe_rcu() which takes mmap_lock for reading.
1258 * Thus this install_breakpoint() can not make
1259 * is_trap_at_addr() true right after find_uprobe_rcu()
1260 * returns NULL in find_active_uprobe_rcu().
1262 mmap_write_lock(mm);
1263 vma = find_vma(mm, info->vaddr);
1264 if (!vma || !valid_vma(vma, is_register) ||
1265 file_inode(vma->vm_file) != uprobe->inode)
1266 goto unlock;
1268 if (vma->vm_start > info->vaddr ||
1269 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1270 goto unlock;
1272 if (is_register) {
1273 /* consult only the "caller", new consumer. */
1274 if (consumer_filter(new, mm))
1275 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1276 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1277 if (!filter_chain(uprobe, mm))
1278 err |= remove_breakpoint(uprobe, mm, info->vaddr);
1281 unlock:
1282 mmap_write_unlock(mm);
1283 free:
1284 mmput(mm);
1285 info = free_map_info(info);
1287 out:
1288 percpu_up_write(&dup_mmap_sem);
1289 return err;
1293 * uprobe_unregister_nosync - unregister an already registered probe.
1294 * @uprobe: uprobe to remove
1295 * @uc: identify which probe if multiple probes are colocated.
1297 void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
1299 int err;
1301 down_write(&uprobe->register_rwsem);
1302 consumer_del(uprobe, uc);
1303 err = register_for_each_vma(uprobe, NULL);
1304 up_write(&uprobe->register_rwsem);
1306 /* TODO : cant unregister? schedule a worker thread */
1307 if (unlikely(err)) {
1308 uprobe_warn(current, "unregister, leaking uprobe");
1309 return;
1312 put_uprobe(uprobe);
1314 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
1316 void uprobe_unregister_sync(void)
1319 * Now that handler_chain() and handle_uretprobe_chain() iterate over
1320 * uprobe->consumers list under RCU protection without holding
1321 * uprobe->register_rwsem, we need to wait for RCU grace period to
1322 * make sure that we can't call into just unregistered
1323 * uprobe_consumer's callbacks anymore. If we don't do that, fast and
1324 * unlucky enough caller can free consumer's memory and cause
1325 * handler_chain() or handle_uretprobe_chain() to do an use-after-free.
1327 synchronize_rcu_tasks_trace();
1328 synchronize_srcu(&uretprobes_srcu);
1330 EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
1333 * uprobe_register - register a probe
1334 * @inode: the file in which the probe has to be placed.
1335 * @offset: offset from the start of the file.
1336 * @ref_ctr_offset: offset of SDT marker / reference counter
1337 * @uc: information on howto handle the probe..
1339 * Apart from the access refcount, uprobe_register() takes a creation
1340 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1341 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1342 * tuple). Creation refcount stops uprobe_unregister from freeing the
1343 * @uprobe even before the register operation is complete. Creation
1344 * refcount is released when the last @uc for the @uprobe
1345 * unregisters. Caller of uprobe_register() is required to keep @inode
1346 * (and the containing mount) referenced.
1348 * Return: pointer to the new uprobe on success or an ERR_PTR on failure.
1350 struct uprobe *uprobe_register(struct inode *inode,
1351 loff_t offset, loff_t ref_ctr_offset,
1352 struct uprobe_consumer *uc)
1354 struct uprobe *uprobe;
1355 int ret;
1357 /* Uprobe must have at least one set consumer */
1358 if (!uc->handler && !uc->ret_handler)
1359 return ERR_PTR(-EINVAL);
1361 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1362 if (!inode->i_mapping->a_ops->read_folio &&
1363 !shmem_mapping(inode->i_mapping))
1364 return ERR_PTR(-EIO);
1365 /* Racy, just to catch the obvious mistakes */
1366 if (offset > i_size_read(inode))
1367 return ERR_PTR(-EINVAL);
1370 * This ensures that copy_from_page(), copy_to_page() and
1371 * __update_ref_ctr() can't cross page boundary.
1373 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1374 return ERR_PTR(-EINVAL);
1375 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1376 return ERR_PTR(-EINVAL);
1378 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1379 if (IS_ERR(uprobe))
1380 return uprobe;
1382 down_write(&uprobe->register_rwsem);
1383 consumer_add(uprobe, uc);
1384 ret = register_for_each_vma(uprobe, uc);
1385 up_write(&uprobe->register_rwsem);
1387 if (ret) {
1388 uprobe_unregister_nosync(uprobe, uc);
1390 * Registration might have partially succeeded, so we can have
1391 * this consumer being called right at this time. We need to
1392 * sync here. It's ok, it's unlikely slow path.
1394 uprobe_unregister_sync();
1395 return ERR_PTR(ret);
1398 return uprobe;
1400 EXPORT_SYMBOL_GPL(uprobe_register);
1403 * uprobe_apply - add or remove the breakpoints according to @uc->filter
1404 * @uprobe: uprobe which "owns" the breakpoint
1405 * @uc: consumer which wants to add more or remove some breakpoints
1406 * @add: add or remove the breakpoints
1407 * Return: 0 on success or negative error code.
1409 int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
1411 struct uprobe_consumer *con;
1412 int ret = -ENOENT;
1414 down_write(&uprobe->register_rwsem);
1416 rcu_read_lock_trace();
1417 list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
1418 if (con == uc) {
1419 ret = register_for_each_vma(uprobe, add ? uc : NULL);
1420 break;
1423 rcu_read_unlock_trace();
1425 up_write(&uprobe->register_rwsem);
1427 return ret;
1430 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1432 VMA_ITERATOR(vmi, mm, 0);
1433 struct vm_area_struct *vma;
1434 int err = 0;
1436 mmap_read_lock(mm);
1437 for_each_vma(vmi, vma) {
1438 unsigned long vaddr;
1439 loff_t offset;
1441 if (!valid_vma(vma, false) ||
1442 file_inode(vma->vm_file) != uprobe->inode)
1443 continue;
1445 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1446 if (uprobe->offset < offset ||
1447 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1448 continue;
1450 vaddr = offset_to_vaddr(vma, uprobe->offset);
1451 err |= remove_breakpoint(uprobe, mm, vaddr);
1453 mmap_read_unlock(mm);
1455 return err;
1458 static struct rb_node *
1459 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1461 struct rb_node *n = uprobes_tree.rb_node;
1463 while (n) {
1464 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1466 if (inode < u->inode) {
1467 n = n->rb_left;
1468 } else if (inode > u->inode) {
1469 n = n->rb_right;
1470 } else {
1471 if (max < u->offset)
1472 n = n->rb_left;
1473 else if (min > u->offset)
1474 n = n->rb_right;
1475 else
1476 break;
1480 return n;
1484 * For a given range in vma, build a list of probes that need to be inserted.
1486 static void build_probe_list(struct inode *inode,
1487 struct vm_area_struct *vma,
1488 unsigned long start, unsigned long end,
1489 struct list_head *head)
1491 loff_t min, max;
1492 struct rb_node *n, *t;
1493 struct uprobe *u;
1495 INIT_LIST_HEAD(head);
1496 min = vaddr_to_offset(vma, start);
1497 max = min + (end - start) - 1;
1499 read_lock(&uprobes_treelock);
1500 n = find_node_in_range(inode, min, max);
1501 if (n) {
1502 for (t = n; t; t = rb_prev(t)) {
1503 u = rb_entry(t, struct uprobe, rb_node);
1504 if (u->inode != inode || u->offset < min)
1505 break;
1506 /* if uprobe went away, it's safe to ignore it */
1507 if (try_get_uprobe(u))
1508 list_add(&u->pending_list, head);
1510 for (t = n; (t = rb_next(t)); ) {
1511 u = rb_entry(t, struct uprobe, rb_node);
1512 if (u->inode != inode || u->offset > max)
1513 break;
1514 /* if uprobe went away, it's safe to ignore it */
1515 if (try_get_uprobe(u))
1516 list_add(&u->pending_list, head);
1519 read_unlock(&uprobes_treelock);
1522 /* @vma contains reference counter, not the probed instruction. */
1523 static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1525 struct list_head *pos, *q;
1526 struct delayed_uprobe *du;
1527 unsigned long vaddr;
1528 int ret = 0, err = 0;
1530 mutex_lock(&delayed_uprobe_lock);
1531 list_for_each_safe(pos, q, &delayed_uprobe_list) {
1532 du = list_entry(pos, struct delayed_uprobe, list);
1534 if (du->mm != vma->vm_mm ||
1535 !valid_ref_ctr_vma(du->uprobe, vma))
1536 continue;
1538 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1539 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1540 if (ret) {
1541 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1542 if (!err)
1543 err = ret;
1545 delayed_uprobe_delete(du);
1547 mutex_unlock(&delayed_uprobe_lock);
1548 return err;
1552 * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
1554 * Currently we ignore all errors and always return 0, the callers
1555 * can't handle the failure anyway.
1557 int uprobe_mmap(struct vm_area_struct *vma)
1559 struct list_head tmp_list;
1560 struct uprobe *uprobe, *u;
1561 struct inode *inode;
1563 if (no_uprobe_events())
1564 return 0;
1566 if (vma->vm_file &&
1567 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1568 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1569 delayed_ref_ctr_inc(vma);
1571 if (!valid_vma(vma, true))
1572 return 0;
1574 inode = file_inode(vma->vm_file);
1575 if (!inode)
1576 return 0;
1578 mutex_lock(uprobes_mmap_hash(inode));
1579 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1581 * We can race with uprobe_unregister(), this uprobe can be already
1582 * removed. But in this case filter_chain() must return false, all
1583 * consumers have gone away.
1585 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1586 if (!fatal_signal_pending(current) &&
1587 filter_chain(uprobe, vma->vm_mm)) {
1588 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1589 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1591 put_uprobe(uprobe);
1593 mutex_unlock(uprobes_mmap_hash(inode));
1595 return 0;
1598 static bool
1599 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1601 loff_t min, max;
1602 struct inode *inode;
1603 struct rb_node *n;
1605 inode = file_inode(vma->vm_file);
1607 min = vaddr_to_offset(vma, start);
1608 max = min + (end - start) - 1;
1610 read_lock(&uprobes_treelock);
1611 n = find_node_in_range(inode, min, max);
1612 read_unlock(&uprobes_treelock);
1614 return !!n;
1618 * Called in context of a munmap of a vma.
1620 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1622 if (no_uprobe_events() || !valid_vma(vma, false))
1623 return;
1625 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1626 return;
1628 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1629 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1630 return;
1632 if (vma_has_uprobes(vma, start, end))
1633 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1636 static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
1637 struct vm_area_struct *vma, struct vm_fault *vmf)
1639 struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
1641 vmf->page = area->page;
1642 get_page(vmf->page);
1643 return 0;
1646 static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
1648 return -EPERM;
1651 static const struct vm_special_mapping xol_mapping = {
1652 .name = "[uprobes]",
1653 .fault = xol_fault,
1654 .mremap = xol_mremap,
1657 /* Slot allocation for XOL */
1658 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1660 struct vm_area_struct *vma;
1661 int ret;
1663 if (mmap_write_lock_killable(mm))
1664 return -EINTR;
1666 if (mm->uprobes_state.xol_area) {
1667 ret = -EALREADY;
1668 goto fail;
1671 if (!area->vaddr) {
1672 /* Try to map as high as possible, this is only a hint. */
1673 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1674 PAGE_SIZE, 0, 0);
1675 if (IS_ERR_VALUE(area->vaddr)) {
1676 ret = area->vaddr;
1677 goto fail;
1681 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1682 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1683 &xol_mapping);
1684 if (IS_ERR(vma)) {
1685 ret = PTR_ERR(vma);
1686 goto fail;
1689 ret = 0;
1690 /* pairs with get_xol_area() */
1691 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1692 fail:
1693 mmap_write_unlock(mm);
1695 return ret;
1698 void * __weak arch_uprobe_trampoline(unsigned long *psize)
1700 static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1702 *psize = UPROBE_SWBP_INSN_SIZE;
1703 return &insn;
1706 static struct xol_area *__create_xol_area(unsigned long vaddr)
1708 struct mm_struct *mm = current->mm;
1709 unsigned long insns_size;
1710 struct xol_area *area;
1711 void *insns;
1713 area = kzalloc(sizeof(*area), GFP_KERNEL);
1714 if (unlikely(!area))
1715 goto out;
1717 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1718 GFP_KERNEL);
1719 if (!area->bitmap)
1720 goto free_area;
1722 area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
1723 if (!area->page)
1724 goto free_bitmap;
1726 area->vaddr = vaddr;
1727 init_waitqueue_head(&area->wq);
1728 /* Reserve the 1st slot for get_trampoline_vaddr() */
1729 set_bit(0, area->bitmap);
1730 insns = arch_uprobe_trampoline(&insns_size);
1731 arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
1733 if (!xol_add_vma(mm, area))
1734 return area;
1736 __free_page(area->page);
1737 free_bitmap:
1738 kfree(area->bitmap);
1739 free_area:
1740 kfree(area);
1741 out:
1742 return NULL;
1746 * get_xol_area - Allocate process's xol_area if necessary.
1747 * This area will be used for storing instructions for execution out of line.
1749 * Returns the allocated area or NULL.
1751 static struct xol_area *get_xol_area(void)
1753 struct mm_struct *mm = current->mm;
1754 struct xol_area *area;
1756 if (!mm->uprobes_state.xol_area)
1757 __create_xol_area(0);
1759 /* Pairs with xol_add_vma() smp_store_release() */
1760 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1761 return area;
1765 * uprobe_clear_state - Free the area allocated for slots.
1767 void uprobe_clear_state(struct mm_struct *mm)
1769 struct xol_area *area = mm->uprobes_state.xol_area;
1771 mutex_lock(&delayed_uprobe_lock);
1772 delayed_uprobe_remove(NULL, mm);
1773 mutex_unlock(&delayed_uprobe_lock);
1775 if (!area)
1776 return;
1778 put_page(area->page);
1779 kfree(area->bitmap);
1780 kfree(area);
1783 void uprobe_start_dup_mmap(void)
1785 percpu_down_read(&dup_mmap_sem);
1788 void uprobe_end_dup_mmap(void)
1790 percpu_up_read(&dup_mmap_sem);
1793 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1795 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1796 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1797 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1798 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1802 static unsigned long xol_get_slot_nr(struct xol_area *area)
1804 unsigned long slot_nr;
1806 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1807 if (slot_nr < UINSNS_PER_PAGE) {
1808 if (!test_and_set_bit(slot_nr, area->bitmap))
1809 return slot_nr;
1812 return UINSNS_PER_PAGE;
1816 * xol_get_insn_slot - allocate a slot for xol.
1818 static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask)
1820 struct xol_area *area = get_xol_area();
1821 unsigned long slot_nr;
1823 if (!area)
1824 return false;
1826 wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE);
1828 utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES;
1829 arch_uprobe_copy_ixol(area->page, utask->xol_vaddr,
1830 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1831 return true;
1835 * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot()
1837 static void xol_free_insn_slot(struct uprobe_task *utask)
1839 struct xol_area *area = current->mm->uprobes_state.xol_area;
1840 unsigned long offset = utask->xol_vaddr - area->vaddr;
1841 unsigned int slot_nr;
1843 utask->xol_vaddr = 0;
1844 /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */
1845 if (WARN_ON_ONCE(offset >= PAGE_SIZE))
1846 return;
1848 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1849 clear_bit(slot_nr, area->bitmap);
1850 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1851 if (waitqueue_active(&area->wq))
1852 wake_up(&area->wq);
1855 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1856 void *src, unsigned long len)
1858 /* Initialize the slot */
1859 copy_to_page(page, vaddr, src, len);
1862 * We probably need flush_icache_user_page() but it needs vma.
1863 * This should work on most of architectures by default. If
1864 * architecture needs to do something different it can define
1865 * its own version of the function.
1867 flush_dcache_page(page);
1871 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1872 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1873 * instruction.
1874 * Return the address of the breakpoint instruction.
1876 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1878 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1881 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1883 struct uprobe_task *utask = current->utask;
1885 if (unlikely(utask && utask->active_uprobe))
1886 return utask->vaddr;
1888 return instruction_pointer(regs);
1891 static struct return_instance *free_ret_instance(struct return_instance *ri, bool cleanup_hprobe)
1893 struct return_instance *next = ri->next;
1895 if (cleanup_hprobe) {
1896 enum hprobe_state hstate;
1898 (void)hprobe_consume(&ri->hprobe, &hstate);
1899 hprobe_finalize(&ri->hprobe, hstate);
1902 kfree_rcu(ri, rcu);
1903 return next;
1907 * Called with no locks held.
1908 * Called in context of an exiting or an exec-ing thread.
1910 void uprobe_free_utask(struct task_struct *t)
1912 struct uprobe_task *utask = t->utask;
1913 struct return_instance *ri;
1915 if (!utask)
1916 return;
1918 WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr);
1920 timer_delete_sync(&utask->ri_timer);
1922 ri = utask->return_instances;
1923 while (ri)
1924 ri = free_ret_instance(ri, true /* cleanup_hprobe */);
1926 kfree(utask);
1927 t->utask = NULL;
1930 #define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */
1932 #define for_each_ret_instance_rcu(pos, head) \
1933 for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next))
1935 static void ri_timer(struct timer_list *timer)
1937 struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer);
1938 struct return_instance *ri;
1940 /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */
1941 guard(srcu)(&uretprobes_srcu);
1942 /* RCU protects return_instance from freeing. */
1943 guard(rcu)();
1945 for_each_ret_instance_rcu(ri, utask->return_instances)
1946 hprobe_expire(&ri->hprobe, false);
1949 static struct uprobe_task *alloc_utask(void)
1951 struct uprobe_task *utask;
1953 utask = kzalloc(sizeof(*utask), GFP_KERNEL);
1954 if (!utask)
1955 return NULL;
1957 timer_setup(&utask->ri_timer, ri_timer, 0);
1959 return utask;
1963 * Allocate a uprobe_task object for the task if necessary.
1964 * Called when the thread hits a breakpoint.
1966 * Returns:
1967 * - pointer to new uprobe_task on success
1968 * - NULL otherwise
1970 static struct uprobe_task *get_utask(void)
1972 if (!current->utask)
1973 current->utask = alloc_utask();
1974 return current->utask;
1977 static size_t ri_size(int consumers_cnt)
1979 struct return_instance *ri;
1981 return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
1984 #define DEF_CNT 4
1986 static struct return_instance *alloc_return_instance(void)
1988 struct return_instance *ri;
1990 ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL);
1991 if (!ri)
1992 return ZERO_SIZE_PTR;
1994 ri->consumers_cnt = DEF_CNT;
1995 return ri;
1998 static struct return_instance *dup_return_instance(struct return_instance *old)
2000 size_t size = ri_size(old->consumers_cnt);
2002 return kmemdup(old, size, GFP_KERNEL);
2005 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
2007 struct uprobe_task *n_utask;
2008 struct return_instance **p, *o, *n;
2009 struct uprobe *uprobe;
2011 n_utask = alloc_utask();
2012 if (!n_utask)
2013 return -ENOMEM;
2014 t->utask = n_utask;
2016 /* protect uprobes from freeing, we'll need try_get_uprobe() them */
2017 guard(srcu)(&uretprobes_srcu);
2019 p = &n_utask->return_instances;
2020 for (o = o_utask->return_instances; o; o = o->next) {
2021 n = dup_return_instance(o);
2022 if (!n)
2023 return -ENOMEM;
2025 /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */
2026 uprobe = hprobe_expire(&o->hprobe, true);
2029 * New utask will have stable properly refcounted uprobe or
2030 * NULL. Even if we failed to get refcounted uprobe, we still
2031 * need to preserve full set of return_instances for proper
2032 * uretprobe handling and nesting in forked task.
2034 hprobe_init_stable(&n->hprobe, uprobe);
2036 n->next = NULL;
2037 rcu_assign_pointer(*p, n);
2038 p = &n->next;
2040 n_utask->depth++;
2043 return 0;
2046 static void dup_xol_work(struct callback_head *work)
2048 if (current->flags & PF_EXITING)
2049 return;
2051 if (!__create_xol_area(current->utask->dup_xol_addr) &&
2052 !fatal_signal_pending(current))
2053 uprobe_warn(current, "dup xol area");
2057 * Called in context of a new clone/fork from copy_process.
2059 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
2061 struct uprobe_task *utask = current->utask;
2062 struct mm_struct *mm = current->mm;
2063 struct xol_area *area;
2065 t->utask = NULL;
2067 if (!utask || !utask->return_instances)
2068 return;
2070 if (mm == t->mm && !(flags & CLONE_VFORK))
2071 return;
2073 if (dup_utask(t, utask))
2074 return uprobe_warn(t, "dup ret instances");
2076 /* The task can fork() after dup_xol_work() fails */
2077 area = mm->uprobes_state.xol_area;
2078 if (!area)
2079 return uprobe_warn(t, "dup xol area");
2081 if (mm == t->mm)
2082 return;
2084 t->utask->dup_xol_addr = area->vaddr;
2085 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
2086 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
2090 * Current area->vaddr notion assume the trampoline address is always
2091 * equal area->vaddr.
2093 * Returns -1 in case the xol_area is not allocated.
2095 unsigned long uprobe_get_trampoline_vaddr(void)
2097 struct xol_area *area;
2098 unsigned long trampoline_vaddr = -1;
2100 /* Pairs with xol_add_vma() smp_store_release() */
2101 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
2102 if (area)
2103 trampoline_vaddr = area->vaddr;
2105 return trampoline_vaddr;
2108 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
2109 struct pt_regs *regs)
2111 struct return_instance *ri = utask->return_instances;
2112 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
2114 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
2115 ri = free_ret_instance(ri, true /* cleanup_hprobe */);
2116 utask->depth--;
2118 rcu_assign_pointer(utask->return_instances, ri);
2121 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
2122 struct return_instance *ri)
2124 struct uprobe_task *utask = current->utask;
2125 unsigned long orig_ret_vaddr, trampoline_vaddr;
2126 bool chained;
2127 int srcu_idx;
2129 if (!get_xol_area())
2130 goto free;
2132 if (utask->depth >= MAX_URETPROBE_DEPTH) {
2133 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
2134 " nestedness limit pid/tgid=%d/%d\n",
2135 current->pid, current->tgid);
2136 goto free;
2139 trampoline_vaddr = uprobe_get_trampoline_vaddr();
2140 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
2141 if (orig_ret_vaddr == -1)
2142 goto free;
2144 /* drop the entries invalidated by longjmp() */
2145 chained = (orig_ret_vaddr == trampoline_vaddr);
2146 cleanup_return_instances(utask, chained, regs);
2149 * We don't want to keep trampoline address in stack, rather keep the
2150 * original return address of first caller thru all the consequent
2151 * instances. This also makes breakpoint unwrapping easier.
2153 if (chained) {
2154 if (!utask->return_instances) {
2156 * This situation is not possible. Likely we have an
2157 * attack from user-space.
2159 uprobe_warn(current, "handle tail call");
2160 goto free;
2162 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
2165 /* __srcu_read_lock() because SRCU lock survives switch to user space */
2166 srcu_idx = __srcu_read_lock(&uretprobes_srcu);
2168 ri->func = instruction_pointer(regs);
2169 ri->stack = user_stack_pointer(regs);
2170 ri->orig_ret_vaddr = orig_ret_vaddr;
2171 ri->chained = chained;
2173 utask->depth++;
2175 hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx);
2176 ri->next = utask->return_instances;
2177 rcu_assign_pointer(utask->return_instances, ri);
2179 mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD);
2181 return;
2182 free:
2183 kfree(ri);
2186 /* Prepare to single-step probed instruction out of line. */
2187 static int
2188 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
2190 struct uprobe_task *utask = current->utask;
2191 int err;
2193 if (!try_get_uprobe(uprobe))
2194 return -EINVAL;
2196 if (!xol_get_insn_slot(uprobe, utask)) {
2197 err = -ENOMEM;
2198 goto err_out;
2201 utask->vaddr = bp_vaddr;
2202 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
2203 if (unlikely(err)) {
2204 xol_free_insn_slot(utask);
2205 goto err_out;
2208 utask->active_uprobe = uprobe;
2209 utask->state = UTASK_SSTEP;
2210 return 0;
2211 err_out:
2212 put_uprobe(uprobe);
2213 return err;
2217 * If we are singlestepping, then ensure this thread is not connected to
2218 * non-fatal signals until completion of singlestep. When xol insn itself
2219 * triggers the signal, restart the original insn even if the task is
2220 * already SIGKILL'ed (since coredump should report the correct ip). This
2221 * is even more important if the task has a handler for SIGSEGV/etc, The
2222 * _same_ instruction should be repeated again after return from the signal
2223 * handler, and SSTEP can never finish in this case.
2225 bool uprobe_deny_signal(void)
2227 struct task_struct *t = current;
2228 struct uprobe_task *utask = t->utask;
2230 if (likely(!utask || !utask->active_uprobe))
2231 return false;
2233 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
2235 if (task_sigpending(t)) {
2236 spin_lock_irq(&t->sighand->siglock);
2237 clear_tsk_thread_flag(t, TIF_SIGPENDING);
2238 spin_unlock_irq(&t->sighand->siglock);
2240 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
2241 utask->state = UTASK_SSTEP_TRAPPED;
2242 set_tsk_thread_flag(t, TIF_UPROBE);
2246 return true;
2249 static void mmf_recalc_uprobes(struct mm_struct *mm)
2251 VMA_ITERATOR(vmi, mm, 0);
2252 struct vm_area_struct *vma;
2254 for_each_vma(vmi, vma) {
2255 if (!valid_vma(vma, false))
2256 continue;
2258 * This is not strictly accurate, we can race with
2259 * uprobe_unregister() and see the already removed
2260 * uprobe if delete_uprobe() was not yet called.
2261 * Or this uprobe can be filtered out.
2263 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2264 return;
2267 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2270 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2272 struct page *page;
2273 uprobe_opcode_t opcode;
2274 int result;
2276 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2277 return -EINVAL;
2279 pagefault_disable();
2280 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2281 pagefault_enable();
2283 if (likely(result == 0))
2284 goto out;
2286 result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
2287 if (result < 0)
2288 return result;
2290 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2291 put_page(page);
2292 out:
2293 /* This needs to return true for any variant of the trap insn */
2294 return is_trap_insn(&opcode);
2297 /* assumes being inside RCU protected region */
2298 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
2300 struct mm_struct *mm = current->mm;
2301 struct uprobe *uprobe = NULL;
2302 struct vm_area_struct *vma;
2304 mmap_read_lock(mm);
2305 vma = vma_lookup(mm, bp_vaddr);
2306 if (vma) {
2307 if (valid_vma(vma, false)) {
2308 struct inode *inode = file_inode(vma->vm_file);
2309 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2311 uprobe = find_uprobe_rcu(inode, offset);
2314 if (!uprobe)
2315 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
2316 } else {
2317 *is_swbp = -EFAULT;
2320 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2321 mmf_recalc_uprobes(mm);
2322 mmap_read_unlock(mm);
2324 return uprobe;
2327 static struct return_instance*
2328 push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie)
2330 if (unlikely(ri == ZERO_SIZE_PTR))
2331 return ri;
2333 if (unlikely(idx >= ri->consumers_cnt)) {
2334 struct return_instance *old_ri = ri;
2336 ri->consumers_cnt += DEF_CNT;
2337 ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL);
2338 if (!ri) {
2339 kfree(old_ri);
2340 return ZERO_SIZE_PTR;
2344 ri->consumers[idx].id = id;
2345 ri->consumers[idx].cookie = cookie;
2346 return ri;
2349 static struct return_consumer *
2350 return_consumer_find(struct return_instance *ri, int *iter, int id)
2352 struct return_consumer *ric;
2353 int idx = *iter;
2355 for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
2356 if (ric->id == id) {
2357 *iter = idx + 1;
2358 return ric;
2361 return NULL;
2364 static bool ignore_ret_handler(int rc)
2366 return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
2369 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2371 struct uprobe_consumer *uc;
2372 bool has_consumers = false, remove = true;
2373 struct return_instance *ri = NULL;
2374 int push_idx = 0;
2376 current->utask->auprobe = &uprobe->arch;
2378 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
2379 bool session = uc->handler && uc->ret_handler;
2380 __u64 cookie = 0;
2381 int rc = 0;
2383 if (uc->handler) {
2384 rc = uc->handler(uc, regs, &cookie);
2385 WARN(rc < 0 || rc > 2,
2386 "bad rc=0x%x from %ps()\n", rc, uc->handler);
2389 remove &= rc == UPROBE_HANDLER_REMOVE;
2390 has_consumers = true;
2392 if (!uc->ret_handler || ignore_ret_handler(rc))
2393 continue;
2395 if (!ri)
2396 ri = alloc_return_instance();
2398 if (session)
2399 ri = push_consumer(ri, push_idx++, uc->id, cookie);
2401 current->utask->auprobe = NULL;
2403 if (!ZERO_OR_NULL_PTR(ri)) {
2405 * The push_idx value has the final number of return consumers,
2406 * and ri->consumers_cnt has number of allocated consumers.
2408 ri->consumers_cnt = push_idx;
2409 prepare_uretprobe(uprobe, regs, ri);
2412 if (remove && has_consumers) {
2413 down_read(&uprobe->register_rwsem);
2415 /* re-check that removal is still required, this time under lock */
2416 if (!filter_chain(uprobe, current->mm)) {
2417 WARN_ON(!uprobe_is_active(uprobe));
2418 unapply_uprobe(uprobe, current->mm);
2421 up_read(&uprobe->register_rwsem);
2425 static void
2426 handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs)
2428 struct return_consumer *ric;
2429 struct uprobe_consumer *uc;
2430 int ric_idx = 0;
2432 /* all consumers unsubscribed meanwhile */
2433 if (unlikely(!uprobe))
2434 return;
2436 rcu_read_lock_trace();
2437 list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
2438 bool session = uc->handler && uc->ret_handler;
2440 if (uc->ret_handler) {
2441 ric = return_consumer_find(ri, &ric_idx, uc->id);
2442 if (!session || ric)
2443 uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
2446 rcu_read_unlock_trace();
2449 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2451 bool chained;
2453 do {
2454 chained = ri->chained;
2455 ri = ri->next; /* can't be NULL if chained */
2456 } while (chained);
2458 return ri;
2461 void uprobe_handle_trampoline(struct pt_regs *regs)
2463 struct uprobe_task *utask;
2464 struct return_instance *ri, *next;
2465 struct uprobe *uprobe;
2466 enum hprobe_state hstate;
2467 bool valid;
2469 utask = current->utask;
2470 if (!utask)
2471 goto sigill;
2473 ri = utask->return_instances;
2474 if (!ri)
2475 goto sigill;
2477 do {
2479 * We should throw out the frames invalidated by longjmp().
2480 * If this chain is valid, then the next one should be alive
2481 * or NULL; the latter case means that nobody but ri->func
2482 * could hit this trampoline on return. TODO: sigaltstack().
2484 next = find_next_ret_chain(ri);
2485 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2487 instruction_pointer_set(regs, ri->orig_ret_vaddr);
2488 do {
2489 /* pop current instance from the stack of pending return instances,
2490 * as it's not pending anymore: we just fixed up original
2491 * instruction pointer in regs and are about to call handlers;
2492 * this allows fixup_uretprobe_trampoline_entries() to properly fix up
2493 * captured stack traces from uretprobe handlers, in which pending
2494 * trampoline addresses on the stack are replaced with correct
2495 * original return addresses
2497 rcu_assign_pointer(utask->return_instances, ri->next);
2499 uprobe = hprobe_consume(&ri->hprobe, &hstate);
2500 if (valid)
2501 handle_uretprobe_chain(ri, uprobe, regs);
2502 hprobe_finalize(&ri->hprobe, hstate);
2504 /* We already took care of hprobe, no need to waste more time on that. */
2505 ri = free_ret_instance(ri, false /* !cleanup_hprobe */);
2506 utask->depth--;
2507 } while (ri != next);
2508 } while (!valid);
2510 return;
2512 sigill:
2513 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2514 force_sig(SIGILL);
2517 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2519 return false;
2522 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2523 struct pt_regs *regs)
2525 return true;
2529 * Run handler and ask thread to singlestep.
2530 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2532 static void handle_swbp(struct pt_regs *regs)
2534 struct uprobe *uprobe;
2535 unsigned long bp_vaddr;
2536 int is_swbp;
2538 bp_vaddr = uprobe_get_swbp_addr(regs);
2539 if (bp_vaddr == uprobe_get_trampoline_vaddr())
2540 return uprobe_handle_trampoline(regs);
2542 rcu_read_lock_trace();
2544 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
2545 if (!uprobe) {
2546 if (is_swbp > 0) {
2547 /* No matching uprobe; signal SIGTRAP. */
2548 force_sig(SIGTRAP);
2549 } else {
2551 * Either we raced with uprobe_unregister() or we can't
2552 * access this memory. The latter is only possible if
2553 * another thread plays with our ->mm. In both cases
2554 * we can simply restart. If this vma was unmapped we
2555 * can pretend this insn was not executed yet and get
2556 * the (correct) SIGSEGV after restart.
2558 instruction_pointer_set(regs, bp_vaddr);
2560 goto out;
2563 /* change it in advance for ->handler() and restart */
2564 instruction_pointer_set(regs, bp_vaddr);
2567 * TODO: move copy_insn/etc into _register and remove this hack.
2568 * After we hit the bp, _unregister + _register can install the
2569 * new and not-yet-analyzed uprobe at the same address, restart.
2571 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2572 goto out;
2575 * Pairs with the smp_wmb() in prepare_uprobe().
2577 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2578 * we must also see the stores to &uprobe->arch performed by the
2579 * prepare_uprobe() call.
2581 smp_rmb();
2583 /* Tracing handlers use ->utask to communicate with fetch methods */
2584 if (!get_utask())
2585 goto out;
2587 if (arch_uprobe_ignore(&uprobe->arch, regs))
2588 goto out;
2590 handler_chain(uprobe, regs);
2592 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2593 goto out;
2595 if (pre_ssout(uprobe, regs, bp_vaddr))
2596 goto out;
2598 out:
2599 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2600 rcu_read_unlock_trace();
2604 * Perform required fix-ups and disable singlestep.
2605 * Allow pending signals to take effect.
2607 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2609 struct uprobe *uprobe;
2610 int err = 0;
2612 uprobe = utask->active_uprobe;
2613 if (utask->state == UTASK_SSTEP_ACK)
2614 err = arch_uprobe_post_xol(&uprobe->arch, regs);
2615 else if (utask->state == UTASK_SSTEP_TRAPPED)
2616 arch_uprobe_abort_xol(&uprobe->arch, regs);
2617 else
2618 WARN_ON_ONCE(1);
2620 put_uprobe(uprobe);
2621 utask->active_uprobe = NULL;
2622 utask->state = UTASK_RUNNING;
2623 xol_free_insn_slot(utask);
2625 spin_lock_irq(&current->sighand->siglock);
2626 recalc_sigpending(); /* see uprobe_deny_signal() */
2627 spin_unlock_irq(&current->sighand->siglock);
2629 if (unlikely(err)) {
2630 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2631 force_sig(SIGILL);
2636 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2637 * allows the thread to return from interrupt. After that handle_swbp()
2638 * sets utask->active_uprobe.
2640 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2641 * and allows the thread to return from interrupt.
2643 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2644 * uprobe_notify_resume().
2646 void uprobe_notify_resume(struct pt_regs *regs)
2648 struct uprobe_task *utask;
2650 clear_thread_flag(TIF_UPROBE);
2652 utask = current->utask;
2653 if (utask && utask->active_uprobe)
2654 handle_singlestep(utask, regs);
2655 else
2656 handle_swbp(regs);
2660 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2661 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2663 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2665 if (!current->mm)
2666 return 0;
2668 if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2669 (!current->utask || !current->utask->return_instances))
2670 return 0;
2672 set_thread_flag(TIF_UPROBE);
2673 return 1;
2677 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2678 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2680 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2682 struct uprobe_task *utask = current->utask;
2684 if (!current->mm || !utask || !utask->active_uprobe)
2685 /* task is currently not uprobed */
2686 return 0;
2688 utask->state = UTASK_SSTEP_ACK;
2689 set_thread_flag(TIF_UPROBE);
2690 return 1;
2693 static struct notifier_block uprobe_exception_nb = {
2694 .notifier_call = arch_uprobe_exception_notify,
2695 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
2698 void __init uprobes_init(void)
2700 int i;
2702 for (i = 0; i < UPROBES_HASH_SZ; i++)
2703 mutex_init(&uprobes_mmap_mutex[i]);
2705 BUG_ON(register_die_notifier(&uprobe_exception_nb));