Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * User-space Probes (UProbes)
0004  *
0005  * Copyright (C) IBM Corporation, 2008-2012
0006  * Authors:
0007  *  Srikar Dronamraju
0008  *  Jim Keniston
0009  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
0010  */
0011 
0012 #include <linux/kernel.h>
0013 #include <linux/highmem.h>
0014 #include <linux/pagemap.h>  /* read_mapping_page */
0015 #include <linux/slab.h>
0016 #include <linux/sched.h>
0017 #include <linux/sched/mm.h>
0018 #include <linux/sched/coredump.h>
0019 #include <linux/export.h>
0020 #include <linux/rmap.h>     /* anon_vma_prepare */
0021 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
0022 #include <linux/swap.h>     /* try_to_free_swap */
0023 #include <linux/ptrace.h>   /* user_enable_single_step */
0024 #include <linux/kdebug.h>   /* notifier mechanism */
0025 #include "../../mm/internal.h"  /* munlock_vma_page */
0026 #include <linux/percpu-rwsem.h>
0027 #include <linux/task_work.h>
0028 #include <linux/shmem_fs.h>
0029 #include <linux/khugepaged.h>
0030 
0031 #include <linux/uprobes.h>
0032 
0033 #define UINSNS_PER_PAGE         (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
0034 #define MAX_UPROBE_XOL_SLOTS        UINSNS_PER_PAGE
0035 
0036 static struct rb_root uprobes_tree = RB_ROOT;
0037 /*
0038  * allows us to skip the uprobe_mmap if there are no uprobe events active
0039  * at this time.  Probably a fine grained per inode count is better?
0040  */
0041 #define no_uprobe_events()  RB_EMPTY_ROOT(&uprobes_tree)
0042 
0043 static DEFINE_SPINLOCK(uprobes_treelock);   /* serialize rbtree access */
0044 
0045 #define UPROBES_HASH_SZ 13
0046 /* serialize uprobe->pending_list */
0047 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
0048 #define uprobes_mmap_hash(v)    (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
0049 
0050 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
0051 
0052 /* Have a copy of original instruction */
0053 #define UPROBE_COPY_INSN    0
0054 
0055 struct uprobe {
0056     struct rb_node      rb_node;    /* node in the rb tree */
0057     refcount_t      ref;
0058     struct rw_semaphore register_rwsem;
0059     struct rw_semaphore consumer_rwsem;
0060     struct list_head    pending_list;
0061     struct uprobe_consumer  *consumers;
0062     struct inode        *inode;     /* Also hold a ref to inode */
0063     loff_t          offset;
0064     loff_t          ref_ctr_offset;
0065     unsigned long       flags;
0066 
0067     /*
0068      * The generic code assumes that it has two members of unknown type
0069      * owned by the arch-specific code:
0070      *
0071      *  insn -  copy_insn() saves the original instruction here for
0072      *      arch_uprobe_analyze_insn().
0073      *
0074      *  ixol -  potentially modified instruction to execute out of
0075      *      line, copied to xol_area by xol_get_insn_slot().
0076      */
0077     struct arch_uprobe  arch;
0078 };
0079 
0080 struct delayed_uprobe {
0081     struct list_head list;
0082     struct uprobe *uprobe;
0083     struct mm_struct *mm;
0084 };
0085 
0086 static DEFINE_MUTEX(delayed_uprobe_lock);
0087 static LIST_HEAD(delayed_uprobe_list);
0088 
0089 /*
0090  * Execute out of line area: anonymous executable mapping installed
0091  * by the probed task to execute the copy of the original instruction
0092  * mangled by set_swbp().
0093  *
0094  * On a breakpoint hit, thread contests for a slot.  It frees the
0095  * slot after singlestep. Currently a fixed number of slots are
0096  * allocated.
0097  */
0098 struct xol_area {
0099     wait_queue_head_t       wq;     /* if all slots are busy */
0100     atomic_t            slot_count; /* number of in-use slots */
0101     unsigned long           *bitmap;    /* 0 = free slot */
0102 
0103     struct vm_special_mapping   xol_mapping;
0104     struct page             *pages[2];
0105     /*
0106      * We keep the vma's vm_start rather than a pointer to the vma
0107      * itself.  The probed process or a naughty kernel module could make
0108      * the vma go away, and we must handle that reasonably gracefully.
0109      */
0110     unsigned long           vaddr;      /* Page(s) of instruction slots */
0111 };
0112 
0113 /*
0114  * valid_vma: Verify if the specified vma is an executable vma
0115  * Relax restrictions while unregistering: vm_flags might have
0116  * changed after breakpoint was inserted.
0117  *  - is_register: indicates if we are in register context.
0118  *  - Return 1 if the specified virtual address is in an
0119  *    executable vma.
0120  */
0121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
0122 {
0123     vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
0124 
0125     if (is_register)
0126         flags |= VM_WRITE;
0127 
0128     return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
0129 }
0130 
0131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
0132 {
0133     return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
0134 }
0135 
0136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
0137 {
0138     return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
0139 }
0140 
0141 /**
0142  * __replace_page - replace page in vma by new page.
0143  * based on replace_page in mm/ksm.c
0144  *
0145  * @vma:      vma that holds the pte pointing to page
0146  * @addr:     address the old @page is mapped at
0147  * @old_page: the page we are replacing by new_page
0148  * @new_page: the modified page we replace page by
0149  *
0150  * If @new_page is NULL, only unmap @old_page.
0151  *
0152  * Returns 0 on success, negative error code otherwise.
0153  */
0154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
0155                 struct page *old_page, struct page *new_page)
0156 {
0157     struct mm_struct *mm = vma->vm_mm;
0158     DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0);
0159     int err;
0160     struct mmu_notifier_range range;
0161 
0162     mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
0163                 addr + PAGE_SIZE);
0164 
0165     if (new_page) {
0166         err = mem_cgroup_charge(page_folio(new_page), vma->vm_mm,
0167                     GFP_KERNEL);
0168         if (err)
0169             return err;
0170     }
0171 
0172     /* For try_to_free_swap() below */
0173     lock_page(old_page);
0174 
0175     mmu_notifier_invalidate_range_start(&range);
0176     err = -EAGAIN;
0177     if (!page_vma_mapped_walk(&pvmw))
0178         goto unlock;
0179     VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
0180 
0181     if (new_page) {
0182         get_page(new_page);
0183         page_add_new_anon_rmap(new_page, vma, addr);
0184         lru_cache_add_inactive_or_unevictable(new_page, vma);
0185     } else
0186         /* no new page, just dec_mm_counter for old_page */
0187         dec_mm_counter(mm, MM_ANONPAGES);
0188 
0189     if (!PageAnon(old_page)) {
0190         dec_mm_counter(mm, mm_counter_file(old_page));
0191         inc_mm_counter(mm, MM_ANONPAGES);
0192     }
0193 
0194     flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
0195     ptep_clear_flush_notify(vma, addr, pvmw.pte);
0196     if (new_page)
0197         set_pte_at_notify(mm, addr, pvmw.pte,
0198                   mk_pte(new_page, vma->vm_page_prot));
0199 
0200     page_remove_rmap(old_page, vma, false);
0201     if (!page_mapped(old_page))
0202         try_to_free_swap(old_page);
0203     page_vma_mapped_walk_done(&pvmw);
0204     put_page(old_page);
0205 
0206     err = 0;
0207  unlock:
0208     mmu_notifier_invalidate_range_end(&range);
0209     unlock_page(old_page);
0210     return err;
0211 }
0212 
0213 /**
0214  * is_swbp_insn - check if instruction is breakpoint instruction.
0215  * @insn: instruction to be checked.
0216  * Default implementation of is_swbp_insn
0217  * Returns true if @insn is a breakpoint instruction.
0218  */
0219 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
0220 {
0221     return *insn == UPROBE_SWBP_INSN;
0222 }
0223 
0224 /**
0225  * is_trap_insn - check if instruction is breakpoint instruction.
0226  * @insn: instruction to be checked.
0227  * Default implementation of is_trap_insn
0228  * Returns true if @insn is a breakpoint instruction.
0229  *
0230  * This function is needed for the case where an architecture has multiple
0231  * trap instructions (like powerpc).
0232  */
0233 bool __weak is_trap_insn(uprobe_opcode_t *insn)
0234 {
0235     return is_swbp_insn(insn);
0236 }
0237 
0238 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
0239 {
0240     void *kaddr = kmap_atomic(page);
0241     memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
0242     kunmap_atomic(kaddr);
0243 }
0244 
0245 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
0246 {
0247     void *kaddr = kmap_atomic(page);
0248     memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
0249     kunmap_atomic(kaddr);
0250 }
0251 
0252 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
0253 {
0254     uprobe_opcode_t old_opcode;
0255     bool is_swbp;
0256 
0257     /*
0258      * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
0259      * We do not check if it is any other 'trap variant' which could
0260      * be conditional trap instruction such as the one powerpc supports.
0261      *
0262      * The logic is that we do not care if the underlying instruction
0263      * is a trap variant; uprobes always wins over any other (gdb)
0264      * breakpoint.
0265      */
0266     copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
0267     is_swbp = is_swbp_insn(&old_opcode);
0268 
0269     if (is_swbp_insn(new_opcode)) {
0270         if (is_swbp)        /* register: already installed? */
0271             return 0;
0272     } else {
0273         if (!is_swbp)       /* unregister: was it changed by us? */
0274             return 0;
0275     }
0276 
0277     return 1;
0278 }
0279 
0280 static struct delayed_uprobe *
0281 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
0282 {
0283     struct delayed_uprobe *du;
0284 
0285     list_for_each_entry(du, &delayed_uprobe_list, list)
0286         if (du->uprobe == uprobe && du->mm == mm)
0287             return du;
0288     return NULL;
0289 }
0290 
0291 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
0292 {
0293     struct delayed_uprobe *du;
0294 
0295     if (delayed_uprobe_check(uprobe, mm))
0296         return 0;
0297 
0298     du  = kzalloc(sizeof(*du), GFP_KERNEL);
0299     if (!du)
0300         return -ENOMEM;
0301 
0302     du->uprobe = uprobe;
0303     du->mm = mm;
0304     list_add(&du->list, &delayed_uprobe_list);
0305     return 0;
0306 }
0307 
0308 static void delayed_uprobe_delete(struct delayed_uprobe *du)
0309 {
0310     if (WARN_ON(!du))
0311         return;
0312     list_del(&du->list);
0313     kfree(du);
0314 }
0315 
0316 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
0317 {
0318     struct list_head *pos, *q;
0319     struct delayed_uprobe *du;
0320 
0321     if (!uprobe && !mm)
0322         return;
0323 
0324     list_for_each_safe(pos, q, &delayed_uprobe_list) {
0325         du = list_entry(pos, struct delayed_uprobe, list);
0326 
0327         if (uprobe && du->uprobe != uprobe)
0328             continue;
0329         if (mm && du->mm != mm)
0330             continue;
0331 
0332         delayed_uprobe_delete(du);
0333     }
0334 }
0335 
0336 static bool valid_ref_ctr_vma(struct uprobe *uprobe,
0337                   struct vm_area_struct *vma)
0338 {
0339     unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
0340 
0341     return uprobe->ref_ctr_offset &&
0342         vma->vm_file &&
0343         file_inode(vma->vm_file) == uprobe->inode &&
0344         (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
0345         vma->vm_start <= vaddr &&
0346         vma->vm_end > vaddr;
0347 }
0348 
0349 static struct vm_area_struct *
0350 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
0351 {
0352     struct vm_area_struct *tmp;
0353 
0354     for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
0355         if (valid_ref_ctr_vma(uprobe, tmp))
0356             return tmp;
0357 
0358     return NULL;
0359 }
0360 
0361 static int
0362 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
0363 {
0364     void *kaddr;
0365     struct page *page;
0366     struct vm_area_struct *vma;
0367     int ret;
0368     short *ptr;
0369 
0370     if (!vaddr || !d)
0371         return -EINVAL;
0372 
0373     ret = get_user_pages_remote(mm, vaddr, 1,
0374             FOLL_WRITE, &page, &vma, NULL);
0375     if (unlikely(ret <= 0)) {
0376         /*
0377          * We are asking for 1 page. If get_user_pages_remote() fails,
0378          * it may return 0, in that case we have to return error.
0379          */
0380         return ret == 0 ? -EBUSY : ret;
0381     }
0382 
0383     kaddr = kmap_atomic(page);
0384     ptr = kaddr + (vaddr & ~PAGE_MASK);
0385 
0386     if (unlikely(*ptr + d < 0)) {
0387         pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
0388             "curr val: %d, delta: %d\n", vaddr, *ptr, d);
0389         ret = -EINVAL;
0390         goto out;
0391     }
0392 
0393     *ptr += d;
0394     ret = 0;
0395 out:
0396     kunmap_atomic(kaddr);
0397     put_page(page);
0398     return ret;
0399 }
0400 
0401 static void update_ref_ctr_warn(struct uprobe *uprobe,
0402                 struct mm_struct *mm, short d)
0403 {
0404     pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
0405         "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
0406         d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
0407         (unsigned long long) uprobe->offset,
0408         (unsigned long long) uprobe->ref_ctr_offset, mm);
0409 }
0410 
0411 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
0412               short d)
0413 {
0414     struct vm_area_struct *rc_vma;
0415     unsigned long rc_vaddr;
0416     int ret = 0;
0417 
0418     rc_vma = find_ref_ctr_vma(uprobe, mm);
0419 
0420     if (rc_vma) {
0421         rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
0422         ret = __update_ref_ctr(mm, rc_vaddr, d);
0423         if (ret)
0424             update_ref_ctr_warn(uprobe, mm, d);
0425 
0426         if (d > 0)
0427             return ret;
0428     }
0429 
0430     mutex_lock(&delayed_uprobe_lock);
0431     if (d > 0)
0432         ret = delayed_uprobe_add(uprobe, mm);
0433     else
0434         delayed_uprobe_remove(uprobe, mm);
0435     mutex_unlock(&delayed_uprobe_lock);
0436 
0437     return ret;
0438 }
0439 
0440 /*
0441  * NOTE:
0442  * Expect the breakpoint instruction to be the smallest size instruction for
0443  * the architecture. If an arch has variable length instruction and the
0444  * breakpoint instruction is not of the smallest length instruction
0445  * supported by that architecture then we need to modify is_trap_at_addr and
0446  * uprobe_write_opcode accordingly. This would never be a problem for archs
0447  * that have fixed length instructions.
0448  *
0449  * uprobe_write_opcode - write the opcode at a given virtual address.
0450  * @auprobe: arch specific probepoint information.
0451  * @mm: the probed process address space.
0452  * @vaddr: the virtual address to store the opcode.
0453  * @opcode: opcode to be written at @vaddr.
0454  *
0455  * Called with mm->mmap_lock held for write.
0456  * Return 0 (success) or a negative errno.
0457  */
0458 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
0459             unsigned long vaddr, uprobe_opcode_t opcode)
0460 {
0461     struct uprobe *uprobe;
0462     struct page *old_page, *new_page;
0463     struct vm_area_struct *vma;
0464     int ret, is_register, ref_ctr_updated = 0;
0465     bool orig_page_huge = false;
0466     unsigned int gup_flags = FOLL_FORCE;
0467 
0468     is_register = is_swbp_insn(&opcode);
0469     uprobe = container_of(auprobe, struct uprobe, arch);
0470 
0471 retry:
0472     if (is_register)
0473         gup_flags |= FOLL_SPLIT_PMD;
0474     /* Read the page with vaddr into memory */
0475     ret = get_user_pages_remote(mm, vaddr, 1, gup_flags,
0476                     &old_page, &vma, NULL);
0477     if (ret <= 0)
0478         return ret;
0479 
0480     ret = verify_opcode(old_page, vaddr, &opcode);
0481     if (ret <= 0)
0482         goto put_old;
0483 
0484     if (WARN(!is_register && PageCompound(old_page),
0485          "uprobe unregister should never work on compound page\n")) {
0486         ret = -EINVAL;
0487         goto put_old;
0488     }
0489 
0490     /* We are going to replace instruction, update ref_ctr. */
0491     if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
0492         ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
0493         if (ret)
0494             goto put_old;
0495 
0496         ref_ctr_updated = 1;
0497     }
0498 
0499     ret = 0;
0500     if (!is_register && !PageAnon(old_page))
0501         goto put_old;
0502 
0503     ret = anon_vma_prepare(vma);
0504     if (ret)
0505         goto put_old;
0506 
0507     ret = -ENOMEM;
0508     new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
0509     if (!new_page)
0510         goto put_old;
0511 
0512     __SetPageUptodate(new_page);
0513     copy_highpage(new_page, old_page);
0514     copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
0515 
0516     if (!is_register) {
0517         struct page *orig_page;
0518         pgoff_t index;
0519 
0520         VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
0521 
0522         index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
0523         orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
0524                       index);
0525 
0526         if (orig_page) {
0527             if (PageUptodate(orig_page) &&
0528                 pages_identical(new_page, orig_page)) {
0529                 /* let go new_page */
0530                 put_page(new_page);
0531                 new_page = NULL;
0532 
0533                 if (PageCompound(orig_page))
0534                     orig_page_huge = true;
0535             }
0536             put_page(orig_page);
0537         }
0538     }
0539 
0540     ret = __replace_page(vma, vaddr, old_page, new_page);
0541     if (new_page)
0542         put_page(new_page);
0543 put_old:
0544     put_page(old_page);
0545 
0546     if (unlikely(ret == -EAGAIN))
0547         goto retry;
0548 
0549     /* Revert back reference counter if instruction update failed. */
0550     if (ret && is_register && ref_ctr_updated)
0551         update_ref_ctr(uprobe, mm, -1);
0552 
0553     /* try collapse pmd for compound page */
0554     if (!ret && orig_page_huge)
0555         collapse_pte_mapped_thp(mm, vaddr);
0556 
0557     return ret;
0558 }
0559 
0560 /**
0561  * set_swbp - store breakpoint at a given address.
0562  * @auprobe: arch specific probepoint information.
0563  * @mm: the probed process address space.
0564  * @vaddr: the virtual address to insert the opcode.
0565  *
0566  * For mm @mm, store the breakpoint instruction at @vaddr.
0567  * Return 0 (success) or a negative errno.
0568  */
0569 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
0570 {
0571     return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
0572 }
0573 
0574 /**
0575  * set_orig_insn - Restore the original instruction.
0576  * @mm: the probed process address space.
0577  * @auprobe: arch specific probepoint information.
0578  * @vaddr: the virtual address to insert the opcode.
0579  *
0580  * For mm @mm, restore the original opcode (opcode) at @vaddr.
0581  * Return 0 (success) or a negative errno.
0582  */
0583 int __weak
0584 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
0585 {
0586     return uprobe_write_opcode(auprobe, mm, vaddr,
0587             *(uprobe_opcode_t *)&auprobe->insn);
0588 }
0589 
0590 static struct uprobe *get_uprobe(struct uprobe *uprobe)
0591 {
0592     refcount_inc(&uprobe->ref);
0593     return uprobe;
0594 }
0595 
0596 static void put_uprobe(struct uprobe *uprobe)
0597 {
0598     if (refcount_dec_and_test(&uprobe->ref)) {
0599         /*
0600          * If application munmap(exec_vma) before uprobe_unregister()
0601          * gets called, we don't get a chance to remove uprobe from
0602          * delayed_uprobe_list from remove_breakpoint(). Do it here.
0603          */
0604         mutex_lock(&delayed_uprobe_lock);
0605         delayed_uprobe_remove(uprobe, NULL);
0606         mutex_unlock(&delayed_uprobe_lock);
0607         kfree(uprobe);
0608     }
0609 }
0610 
0611 static __always_inline
0612 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
0613            const struct uprobe *r)
0614 {
0615     if (l_inode < r->inode)
0616         return -1;
0617 
0618     if (l_inode > r->inode)
0619         return 1;
0620 
0621     if (l_offset < r->offset)
0622         return -1;
0623 
0624     if (l_offset > r->offset)
0625         return 1;
0626 
0627     return 0;
0628 }
0629 
0630 #define __node_2_uprobe(node) \
0631     rb_entry((node), struct uprobe, rb_node)
0632 
0633 struct __uprobe_key {
0634     struct inode *inode;
0635     loff_t offset;
0636 };
0637 
0638 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
0639 {
0640     const struct __uprobe_key *a = key;
0641     return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
0642 }
0643 
0644 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
0645 {
0646     struct uprobe *u = __node_2_uprobe(a);
0647     return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
0648 }
0649 
0650 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
0651 {
0652     struct __uprobe_key key = {
0653         .inode = inode,
0654         .offset = offset,
0655     };
0656     struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
0657 
0658     if (node)
0659         return get_uprobe(__node_2_uprobe(node));
0660 
0661     return NULL;
0662 }
0663 
0664 /*
0665  * Find a uprobe corresponding to a given inode:offset
0666  * Acquires uprobes_treelock
0667  */
0668 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
0669 {
0670     struct uprobe *uprobe;
0671 
0672     spin_lock(&uprobes_treelock);
0673     uprobe = __find_uprobe(inode, offset);
0674     spin_unlock(&uprobes_treelock);
0675 
0676     return uprobe;
0677 }
0678 
0679 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
0680 {
0681     struct rb_node *node;
0682 
0683     node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
0684     if (node)
0685         return get_uprobe(__node_2_uprobe(node));
0686 
0687     /* get access + creation ref */
0688     refcount_set(&uprobe->ref, 2);
0689     return NULL;
0690 }
0691 
0692 /*
0693  * Acquire uprobes_treelock.
0694  * Matching uprobe already exists in rbtree;
0695  *  increment (access refcount) and return the matching uprobe.
0696  *
0697  * No matching uprobe; insert the uprobe in rb_tree;
0698  *  get a double refcount (access + creation) and return NULL.
0699  */
0700 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
0701 {
0702     struct uprobe *u;
0703 
0704     spin_lock(&uprobes_treelock);
0705     u = __insert_uprobe(uprobe);
0706     spin_unlock(&uprobes_treelock);
0707 
0708     return u;
0709 }
0710 
0711 static void
0712 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
0713 {
0714     pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
0715         "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
0716         uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
0717         (unsigned long long) cur_uprobe->ref_ctr_offset,
0718         (unsigned long long) uprobe->ref_ctr_offset);
0719 }
0720 
0721 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
0722                    loff_t ref_ctr_offset)
0723 {
0724     struct uprobe *uprobe, *cur_uprobe;
0725 
0726     uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
0727     if (!uprobe)
0728         return NULL;
0729 
0730     uprobe->inode = inode;
0731     uprobe->offset = offset;
0732     uprobe->ref_ctr_offset = ref_ctr_offset;
0733     init_rwsem(&uprobe->register_rwsem);
0734     init_rwsem(&uprobe->consumer_rwsem);
0735 
0736     /* add to uprobes_tree, sorted on inode:offset */
0737     cur_uprobe = insert_uprobe(uprobe);
0738     /* a uprobe exists for this inode:offset combination */
0739     if (cur_uprobe) {
0740         if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
0741             ref_ctr_mismatch_warn(cur_uprobe, uprobe);
0742             put_uprobe(cur_uprobe);
0743             kfree(uprobe);
0744             return ERR_PTR(-EINVAL);
0745         }
0746         kfree(uprobe);
0747         uprobe = cur_uprobe;
0748     }
0749 
0750     return uprobe;
0751 }
0752 
0753 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
0754 {
0755     down_write(&uprobe->consumer_rwsem);
0756     uc->next = uprobe->consumers;
0757     uprobe->consumers = uc;
0758     up_write(&uprobe->consumer_rwsem);
0759 }
0760 
0761 /*
0762  * For uprobe @uprobe, delete the consumer @uc.
0763  * Return true if the @uc is deleted successfully
0764  * or return false.
0765  */
0766 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
0767 {
0768     struct uprobe_consumer **con;
0769     bool ret = false;
0770 
0771     down_write(&uprobe->consumer_rwsem);
0772     for (con = &uprobe->consumers; *con; con = &(*con)->next) {
0773         if (*con == uc) {
0774             *con = uc->next;
0775             ret = true;
0776             break;
0777         }
0778     }
0779     up_write(&uprobe->consumer_rwsem);
0780 
0781     return ret;
0782 }
0783 
0784 static int __copy_insn(struct address_space *mapping, struct file *filp,
0785             void *insn, int nbytes, loff_t offset)
0786 {
0787     struct page *page;
0788     /*
0789      * Ensure that the page that has the original instruction is populated
0790      * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
0791      * see uprobe_register().
0792      */
0793     if (mapping->a_ops->read_folio)
0794         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
0795     else
0796         page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
0797     if (IS_ERR(page))
0798         return PTR_ERR(page);
0799 
0800     copy_from_page(page, offset, insn, nbytes);
0801     put_page(page);
0802 
0803     return 0;
0804 }
0805 
0806 static int copy_insn(struct uprobe *uprobe, struct file *filp)
0807 {
0808     struct address_space *mapping = uprobe->inode->i_mapping;
0809     loff_t offs = uprobe->offset;
0810     void *insn = &uprobe->arch.insn;
0811     int size = sizeof(uprobe->arch.insn);
0812     int len, err = -EIO;
0813 
0814     /* Copy only available bytes, -EIO if nothing was read */
0815     do {
0816         if (offs >= i_size_read(uprobe->inode))
0817             break;
0818 
0819         len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
0820         err = __copy_insn(mapping, filp, insn, len, offs);
0821         if (err)
0822             break;
0823 
0824         insn += len;
0825         offs += len;
0826         size -= len;
0827     } while (size);
0828 
0829     return err;
0830 }
0831 
0832 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
0833                 struct mm_struct *mm, unsigned long vaddr)
0834 {
0835     int ret = 0;
0836 
0837     if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
0838         return ret;
0839 
0840     /* TODO: move this into _register, until then we abuse this sem. */
0841     down_write(&uprobe->consumer_rwsem);
0842     if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
0843         goto out;
0844 
0845     ret = copy_insn(uprobe, file);
0846     if (ret)
0847         goto out;
0848 
0849     ret = -ENOTSUPP;
0850     if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
0851         goto out;
0852 
0853     ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
0854     if (ret)
0855         goto out;
0856 
0857     smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
0858     set_bit(UPROBE_COPY_INSN, &uprobe->flags);
0859 
0860  out:
0861     up_write(&uprobe->consumer_rwsem);
0862 
0863     return ret;
0864 }
0865 
0866 static inline bool consumer_filter(struct uprobe_consumer *uc,
0867                    enum uprobe_filter_ctx ctx, struct mm_struct *mm)
0868 {
0869     return !uc->filter || uc->filter(uc, ctx, mm);
0870 }
0871 
0872 static bool filter_chain(struct uprobe *uprobe,
0873              enum uprobe_filter_ctx ctx, struct mm_struct *mm)
0874 {
0875     struct uprobe_consumer *uc;
0876     bool ret = false;
0877 
0878     down_read(&uprobe->consumer_rwsem);
0879     for (uc = uprobe->consumers; uc; uc = uc->next) {
0880         ret = consumer_filter(uc, ctx, mm);
0881         if (ret)
0882             break;
0883     }
0884     up_read(&uprobe->consumer_rwsem);
0885 
0886     return ret;
0887 }
0888 
0889 static int
0890 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
0891             struct vm_area_struct *vma, unsigned long vaddr)
0892 {
0893     bool first_uprobe;
0894     int ret;
0895 
0896     ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
0897     if (ret)
0898         return ret;
0899 
0900     /*
0901      * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
0902      * the task can hit this breakpoint right after __replace_page().
0903      */
0904     first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
0905     if (first_uprobe)
0906         set_bit(MMF_HAS_UPROBES, &mm->flags);
0907 
0908     ret = set_swbp(&uprobe->arch, mm, vaddr);
0909     if (!ret)
0910         clear_bit(MMF_RECALC_UPROBES, &mm->flags);
0911     else if (first_uprobe)
0912         clear_bit(MMF_HAS_UPROBES, &mm->flags);
0913 
0914     return ret;
0915 }
0916 
0917 static int
0918 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
0919 {
0920     set_bit(MMF_RECALC_UPROBES, &mm->flags);
0921     return set_orig_insn(&uprobe->arch, mm, vaddr);
0922 }
0923 
0924 static inline bool uprobe_is_active(struct uprobe *uprobe)
0925 {
0926     return !RB_EMPTY_NODE(&uprobe->rb_node);
0927 }
0928 /*
0929  * There could be threads that have already hit the breakpoint. They
0930  * will recheck the current insn and restart if find_uprobe() fails.
0931  * See find_active_uprobe().
0932  */
0933 static void delete_uprobe(struct uprobe *uprobe)
0934 {
0935     if (WARN_ON(!uprobe_is_active(uprobe)))
0936         return;
0937 
0938     spin_lock(&uprobes_treelock);
0939     rb_erase(&uprobe->rb_node, &uprobes_tree);
0940     spin_unlock(&uprobes_treelock);
0941     RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
0942     put_uprobe(uprobe);
0943 }
0944 
0945 struct map_info {
0946     struct map_info *next;
0947     struct mm_struct *mm;
0948     unsigned long vaddr;
0949 };
0950 
0951 static inline struct map_info *free_map_info(struct map_info *info)
0952 {
0953     struct map_info *next = info->next;
0954     kfree(info);
0955     return next;
0956 }
0957 
0958 static struct map_info *
0959 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
0960 {
0961     unsigned long pgoff = offset >> PAGE_SHIFT;
0962     struct vm_area_struct *vma;
0963     struct map_info *curr = NULL;
0964     struct map_info *prev = NULL;
0965     struct map_info *info;
0966     int more = 0;
0967 
0968  again:
0969     i_mmap_lock_read(mapping);
0970     vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
0971         if (!valid_vma(vma, is_register))
0972             continue;
0973 
0974         if (!prev && !more) {
0975             /*
0976              * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
0977              * reclaim. This is optimistic, no harm done if it fails.
0978              */
0979             prev = kmalloc(sizeof(struct map_info),
0980                     GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
0981             if (prev)
0982                 prev->next = NULL;
0983         }
0984         if (!prev) {
0985             more++;
0986             continue;
0987         }
0988 
0989         if (!mmget_not_zero(vma->vm_mm))
0990             continue;
0991 
0992         info = prev;
0993         prev = prev->next;
0994         info->next = curr;
0995         curr = info;
0996 
0997         info->mm = vma->vm_mm;
0998         info->vaddr = offset_to_vaddr(vma, offset);
0999     }
1000     i_mmap_unlock_read(mapping);
1001 
1002     if (!more)
1003         goto out;
1004 
1005     prev = curr;
1006     while (curr) {
1007         mmput(curr->mm);
1008         curr = curr->next;
1009     }
1010 
1011     do {
1012         info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1013         if (!info) {
1014             curr = ERR_PTR(-ENOMEM);
1015             goto out;
1016         }
1017         info->next = prev;
1018         prev = info;
1019     } while (--more);
1020 
1021     goto again;
1022  out:
1023     while (prev)
1024         prev = free_map_info(prev);
1025     return curr;
1026 }
1027 
1028 static int
1029 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1030 {
1031     bool is_register = !!new;
1032     struct map_info *info;
1033     int err = 0;
1034 
1035     percpu_down_write(&dup_mmap_sem);
1036     info = build_map_info(uprobe->inode->i_mapping,
1037                     uprobe->offset, is_register);
1038     if (IS_ERR(info)) {
1039         err = PTR_ERR(info);
1040         goto out;
1041     }
1042 
1043     while (info) {
1044         struct mm_struct *mm = info->mm;
1045         struct vm_area_struct *vma;
1046 
1047         if (err && is_register)
1048             goto free;
1049 
1050         mmap_write_lock(mm);
1051         vma = find_vma(mm, info->vaddr);
1052         if (!vma || !valid_vma(vma, is_register) ||
1053             file_inode(vma->vm_file) != uprobe->inode)
1054             goto unlock;
1055 
1056         if (vma->vm_start > info->vaddr ||
1057             vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1058             goto unlock;
1059 
1060         if (is_register) {
1061             /* consult only the "caller", new consumer. */
1062             if (consumer_filter(new,
1063                     UPROBE_FILTER_REGISTER, mm))
1064                 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1065         } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1066             if (!filter_chain(uprobe,
1067                     UPROBE_FILTER_UNREGISTER, mm))
1068                 err |= remove_breakpoint(uprobe, mm, info->vaddr);
1069         }
1070 
1071  unlock:
1072         mmap_write_unlock(mm);
1073  free:
1074         mmput(mm);
1075         info = free_map_info(info);
1076     }
1077  out:
1078     percpu_up_write(&dup_mmap_sem);
1079     return err;
1080 }
1081 
1082 static void
1083 __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1084 {
1085     int err;
1086 
1087     if (WARN_ON(!consumer_del(uprobe, uc)))
1088         return;
1089 
1090     err = register_for_each_vma(uprobe, NULL);
1091     /* TODO : cant unregister? schedule a worker thread */
1092     if (!uprobe->consumers && !err)
1093         delete_uprobe(uprobe);
1094 }
1095 
1096 /*
1097  * uprobe_unregister - unregister an already registered probe.
1098  * @inode: the file in which the probe has to be removed.
1099  * @offset: offset from the start of the file.
1100  * @uc: identify which probe if multiple probes are colocated.
1101  */
1102 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1103 {
1104     struct uprobe *uprobe;
1105 
1106     uprobe = find_uprobe(inode, offset);
1107     if (WARN_ON(!uprobe))
1108         return;
1109 
1110     down_write(&uprobe->register_rwsem);
1111     __uprobe_unregister(uprobe, uc);
1112     up_write(&uprobe->register_rwsem);
1113     put_uprobe(uprobe);
1114 }
1115 EXPORT_SYMBOL_GPL(uprobe_unregister);
1116 
1117 /*
1118  * __uprobe_register - register a probe
1119  * @inode: the file in which the probe has to be placed.
1120  * @offset: offset from the start of the file.
1121  * @uc: information on howto handle the probe..
1122  *
1123  * Apart from the access refcount, __uprobe_register() takes a creation
1124  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1125  * inserted into the rbtree (i.e first consumer for a @inode:@offset
1126  * tuple).  Creation refcount stops uprobe_unregister from freeing the
1127  * @uprobe even before the register operation is complete. Creation
1128  * refcount is released when the last @uc for the @uprobe
1129  * unregisters. Caller of __uprobe_register() is required to keep @inode
1130  * (and the containing mount) referenced.
1131  *
1132  * Return errno if it cannot successully install probes
1133  * else return 0 (success)
1134  */
1135 static int __uprobe_register(struct inode *inode, loff_t offset,
1136                  loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1137 {
1138     struct uprobe *uprobe;
1139     int ret;
1140 
1141     /* Uprobe must have at least one set consumer */
1142     if (!uc->handler && !uc->ret_handler)
1143         return -EINVAL;
1144 
1145     /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1146     if (!inode->i_mapping->a_ops->read_folio &&
1147         !shmem_mapping(inode->i_mapping))
1148         return -EIO;
1149     /* Racy, just to catch the obvious mistakes */
1150     if (offset > i_size_read(inode))
1151         return -EINVAL;
1152 
1153     /*
1154      * This ensures that copy_from_page(), copy_to_page() and
1155      * __update_ref_ctr() can't cross page boundary.
1156      */
1157     if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1158         return -EINVAL;
1159     if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1160         return -EINVAL;
1161 
1162  retry:
1163     uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1164     if (!uprobe)
1165         return -ENOMEM;
1166     if (IS_ERR(uprobe))
1167         return PTR_ERR(uprobe);
1168 
1169     /*
1170      * We can race with uprobe_unregister()->delete_uprobe().
1171      * Check uprobe_is_active() and retry if it is false.
1172      */
1173     down_write(&uprobe->register_rwsem);
1174     ret = -EAGAIN;
1175     if (likely(uprobe_is_active(uprobe))) {
1176         consumer_add(uprobe, uc);
1177         ret = register_for_each_vma(uprobe, uc);
1178         if (ret)
1179             __uprobe_unregister(uprobe, uc);
1180     }
1181     up_write(&uprobe->register_rwsem);
1182     put_uprobe(uprobe);
1183 
1184     if (unlikely(ret == -EAGAIN))
1185         goto retry;
1186     return ret;
1187 }
1188 
1189 int uprobe_register(struct inode *inode, loff_t offset,
1190             struct uprobe_consumer *uc)
1191 {
1192     return __uprobe_register(inode, offset, 0, uc);
1193 }
1194 EXPORT_SYMBOL_GPL(uprobe_register);
1195 
1196 int uprobe_register_refctr(struct inode *inode, loff_t offset,
1197                loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1198 {
1199     return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1200 }
1201 EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1202 
1203 /*
1204  * uprobe_apply - unregister an already registered probe.
1205  * @inode: the file in which the probe has to be removed.
1206  * @offset: offset from the start of the file.
1207  * @uc: consumer which wants to add more or remove some breakpoints
1208  * @add: add or remove the breakpoints
1209  */
1210 int uprobe_apply(struct inode *inode, loff_t offset,
1211             struct uprobe_consumer *uc, bool add)
1212 {
1213     struct uprobe *uprobe;
1214     struct uprobe_consumer *con;
1215     int ret = -ENOENT;
1216 
1217     uprobe = find_uprobe(inode, offset);
1218     if (WARN_ON(!uprobe))
1219         return ret;
1220 
1221     down_write(&uprobe->register_rwsem);
1222     for (con = uprobe->consumers; con && con != uc ; con = con->next)
1223         ;
1224     if (con)
1225         ret = register_for_each_vma(uprobe, add ? uc : NULL);
1226     up_write(&uprobe->register_rwsem);
1227     put_uprobe(uprobe);
1228 
1229     return ret;
1230 }
1231 
1232 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1233 {
1234     struct vm_area_struct *vma;
1235     int err = 0;
1236 
1237     mmap_read_lock(mm);
1238     for (vma = mm->mmap; vma; vma = vma->vm_next) {
1239         unsigned long vaddr;
1240         loff_t offset;
1241 
1242         if (!valid_vma(vma, false) ||
1243             file_inode(vma->vm_file) != uprobe->inode)
1244             continue;
1245 
1246         offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1247         if (uprobe->offset <  offset ||
1248             uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1249             continue;
1250 
1251         vaddr = offset_to_vaddr(vma, uprobe->offset);
1252         err |= remove_breakpoint(uprobe, mm, vaddr);
1253     }
1254     mmap_read_unlock(mm);
1255 
1256     return err;
1257 }
1258 
1259 static struct rb_node *
1260 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1261 {
1262     struct rb_node *n = uprobes_tree.rb_node;
1263 
1264     while (n) {
1265         struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1266 
1267         if (inode < u->inode) {
1268             n = n->rb_left;
1269         } else if (inode > u->inode) {
1270             n = n->rb_right;
1271         } else {
1272             if (max < u->offset)
1273                 n = n->rb_left;
1274             else if (min > u->offset)
1275                 n = n->rb_right;
1276             else
1277                 break;
1278         }
1279     }
1280 
1281     return n;
1282 }
1283 
1284 /*
1285  * For a given range in vma, build a list of probes that need to be inserted.
1286  */
1287 static void build_probe_list(struct inode *inode,
1288                 struct vm_area_struct *vma,
1289                 unsigned long start, unsigned long end,
1290                 struct list_head *head)
1291 {
1292     loff_t min, max;
1293     struct rb_node *n, *t;
1294     struct uprobe *u;
1295 
1296     INIT_LIST_HEAD(head);
1297     min = vaddr_to_offset(vma, start);
1298     max = min + (end - start) - 1;
1299 
1300     spin_lock(&uprobes_treelock);
1301     n = find_node_in_range(inode, min, max);
1302     if (n) {
1303         for (t = n; t; t = rb_prev(t)) {
1304             u = rb_entry(t, struct uprobe, rb_node);
1305             if (u->inode != inode || u->offset < min)
1306                 break;
1307             list_add(&u->pending_list, head);
1308             get_uprobe(u);
1309         }
1310         for (t = n; (t = rb_next(t)); ) {
1311             u = rb_entry(t, struct uprobe, rb_node);
1312             if (u->inode != inode || u->offset > max)
1313                 break;
1314             list_add(&u->pending_list, head);
1315             get_uprobe(u);
1316         }
1317     }
1318     spin_unlock(&uprobes_treelock);
1319 }
1320 
1321 /* @vma contains reference counter, not the probed instruction. */
1322 static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1323 {
1324     struct list_head *pos, *q;
1325     struct delayed_uprobe *du;
1326     unsigned long vaddr;
1327     int ret = 0, err = 0;
1328 
1329     mutex_lock(&delayed_uprobe_lock);
1330     list_for_each_safe(pos, q, &delayed_uprobe_list) {
1331         du = list_entry(pos, struct delayed_uprobe, list);
1332 
1333         if (du->mm != vma->vm_mm ||
1334             !valid_ref_ctr_vma(du->uprobe, vma))
1335             continue;
1336 
1337         vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1338         ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1339         if (ret) {
1340             update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1341             if (!err)
1342                 err = ret;
1343         }
1344         delayed_uprobe_delete(du);
1345     }
1346     mutex_unlock(&delayed_uprobe_lock);
1347     return err;
1348 }
1349 
1350 /*
1351  * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
1352  *
1353  * Currently we ignore all errors and always return 0, the callers
1354  * can't handle the failure anyway.
1355  */
1356 int uprobe_mmap(struct vm_area_struct *vma)
1357 {
1358     struct list_head tmp_list;
1359     struct uprobe *uprobe, *u;
1360     struct inode *inode;
1361 
1362     if (no_uprobe_events())
1363         return 0;
1364 
1365     if (vma->vm_file &&
1366         (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1367         test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1368         delayed_ref_ctr_inc(vma);
1369 
1370     if (!valid_vma(vma, true))
1371         return 0;
1372 
1373     inode = file_inode(vma->vm_file);
1374     if (!inode)
1375         return 0;
1376 
1377     mutex_lock(uprobes_mmap_hash(inode));
1378     build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1379     /*
1380      * We can race with uprobe_unregister(), this uprobe can be already
1381      * removed. But in this case filter_chain() must return false, all
1382      * consumers have gone away.
1383      */
1384     list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1385         if (!fatal_signal_pending(current) &&
1386             filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1387             unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1388             install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1389         }
1390         put_uprobe(uprobe);
1391     }
1392     mutex_unlock(uprobes_mmap_hash(inode));
1393 
1394     return 0;
1395 }
1396 
1397 static bool
1398 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1399 {
1400     loff_t min, max;
1401     struct inode *inode;
1402     struct rb_node *n;
1403 
1404     inode = file_inode(vma->vm_file);
1405 
1406     min = vaddr_to_offset(vma, start);
1407     max = min + (end - start) - 1;
1408 
1409     spin_lock(&uprobes_treelock);
1410     n = find_node_in_range(inode, min, max);
1411     spin_unlock(&uprobes_treelock);
1412 
1413     return !!n;
1414 }
1415 
1416 /*
1417  * Called in context of a munmap of a vma.
1418  */
1419 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1420 {
1421     if (no_uprobe_events() || !valid_vma(vma, false))
1422         return;
1423 
1424     if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1425         return;
1426 
1427     if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1428          test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1429         return;
1430 
1431     if (vma_has_uprobes(vma, start, end))
1432         set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1433 }
1434 
1435 /* Slot allocation for XOL */
1436 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1437 {
1438     struct vm_area_struct *vma;
1439     int ret;
1440 
1441     if (mmap_write_lock_killable(mm))
1442         return -EINTR;
1443 
1444     if (mm->uprobes_state.xol_area) {
1445         ret = -EALREADY;
1446         goto fail;
1447     }
1448 
1449     if (!area->vaddr) {
1450         /* Try to map as high as possible, this is only a hint. */
1451         area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1452                         PAGE_SIZE, 0, 0);
1453         if (IS_ERR_VALUE(area->vaddr)) {
1454             ret = area->vaddr;
1455             goto fail;
1456         }
1457     }
1458 
1459     vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1460                 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1461                 &area->xol_mapping);
1462     if (IS_ERR(vma)) {
1463         ret = PTR_ERR(vma);
1464         goto fail;
1465     }
1466 
1467     ret = 0;
1468     /* pairs with get_xol_area() */
1469     smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1470  fail:
1471     mmap_write_unlock(mm);
1472 
1473     return ret;
1474 }
1475 
1476 static struct xol_area *__create_xol_area(unsigned long vaddr)
1477 {
1478     struct mm_struct *mm = current->mm;
1479     uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1480     struct xol_area *area;
1481 
1482     area = kmalloc(sizeof(*area), GFP_KERNEL);
1483     if (unlikely(!area))
1484         goto out;
1485 
1486     area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1487                    GFP_KERNEL);
1488     if (!area->bitmap)
1489         goto free_area;
1490 
1491     area->xol_mapping.name = "[uprobes]";
1492     area->xol_mapping.fault = NULL;
1493     area->xol_mapping.pages = area->pages;
1494     area->pages[0] = alloc_page(GFP_HIGHUSER);
1495     if (!area->pages[0])
1496         goto free_bitmap;
1497     area->pages[1] = NULL;
1498 
1499     area->vaddr = vaddr;
1500     init_waitqueue_head(&area->wq);
1501     /* Reserve the 1st slot for get_trampoline_vaddr() */
1502     set_bit(0, area->bitmap);
1503     atomic_set(&area->slot_count, 1);
1504     arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1505 
1506     if (!xol_add_vma(mm, area))
1507         return area;
1508 
1509     __free_page(area->pages[0]);
1510  free_bitmap:
1511     kfree(area->bitmap);
1512  free_area:
1513     kfree(area);
1514  out:
1515     return NULL;
1516 }
1517 
1518 /*
1519  * get_xol_area - Allocate process's xol_area if necessary.
1520  * This area will be used for storing instructions for execution out of line.
1521  *
1522  * Returns the allocated area or NULL.
1523  */
1524 static struct xol_area *get_xol_area(void)
1525 {
1526     struct mm_struct *mm = current->mm;
1527     struct xol_area *area;
1528 
1529     if (!mm->uprobes_state.xol_area)
1530         __create_xol_area(0);
1531 
1532     /* Pairs with xol_add_vma() smp_store_release() */
1533     area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1534     return area;
1535 }
1536 
1537 /*
1538  * uprobe_clear_state - Free the area allocated for slots.
1539  */
1540 void uprobe_clear_state(struct mm_struct *mm)
1541 {
1542     struct xol_area *area = mm->uprobes_state.xol_area;
1543 
1544     mutex_lock(&delayed_uprobe_lock);
1545     delayed_uprobe_remove(NULL, mm);
1546     mutex_unlock(&delayed_uprobe_lock);
1547 
1548     if (!area)
1549         return;
1550 
1551     put_page(area->pages[0]);
1552     kfree(area->bitmap);
1553     kfree(area);
1554 }
1555 
1556 void uprobe_start_dup_mmap(void)
1557 {
1558     percpu_down_read(&dup_mmap_sem);
1559 }
1560 
1561 void uprobe_end_dup_mmap(void)
1562 {
1563     percpu_up_read(&dup_mmap_sem);
1564 }
1565 
1566 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1567 {
1568     if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1569         set_bit(MMF_HAS_UPROBES, &newmm->flags);
1570         /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1571         set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1572     }
1573 }
1574 
1575 /*
1576  *  - search for a free slot.
1577  */
1578 static unsigned long xol_take_insn_slot(struct xol_area *area)
1579 {
1580     unsigned long slot_addr;
1581     int slot_nr;
1582 
1583     do {
1584         slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1585         if (slot_nr < UINSNS_PER_PAGE) {
1586             if (!test_and_set_bit(slot_nr, area->bitmap))
1587                 break;
1588 
1589             slot_nr = UINSNS_PER_PAGE;
1590             continue;
1591         }
1592         wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1593     } while (slot_nr >= UINSNS_PER_PAGE);
1594 
1595     slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1596     atomic_inc(&area->slot_count);
1597 
1598     return slot_addr;
1599 }
1600 
1601 /*
1602  * xol_get_insn_slot - allocate a slot for xol.
1603  * Returns the allocated slot address or 0.
1604  */
1605 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1606 {
1607     struct xol_area *area;
1608     unsigned long xol_vaddr;
1609 
1610     area = get_xol_area();
1611     if (!area)
1612         return 0;
1613 
1614     xol_vaddr = xol_take_insn_slot(area);
1615     if (unlikely(!xol_vaddr))
1616         return 0;
1617 
1618     arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1619                   &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1620 
1621     return xol_vaddr;
1622 }
1623 
1624 /*
1625  * xol_free_insn_slot - If slot was earlier allocated by
1626  * @xol_get_insn_slot(), make the slot available for
1627  * subsequent requests.
1628  */
1629 static void xol_free_insn_slot(struct task_struct *tsk)
1630 {
1631     struct xol_area *area;
1632     unsigned long vma_end;
1633     unsigned long slot_addr;
1634 
1635     if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1636         return;
1637 
1638     slot_addr = tsk->utask->xol_vaddr;
1639     if (unlikely(!slot_addr))
1640         return;
1641 
1642     area = tsk->mm->uprobes_state.xol_area;
1643     vma_end = area->vaddr + PAGE_SIZE;
1644     if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1645         unsigned long offset;
1646         int slot_nr;
1647 
1648         offset = slot_addr - area->vaddr;
1649         slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1650         if (slot_nr >= UINSNS_PER_PAGE)
1651             return;
1652 
1653         clear_bit(slot_nr, area->bitmap);
1654         atomic_dec(&area->slot_count);
1655         smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1656         if (waitqueue_active(&area->wq))
1657             wake_up(&area->wq);
1658 
1659         tsk->utask->xol_vaddr = 0;
1660     }
1661 }
1662 
1663 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1664                   void *src, unsigned long len)
1665 {
1666     /* Initialize the slot */
1667     copy_to_page(page, vaddr, src, len);
1668 
1669     /*
1670      * We probably need flush_icache_user_page() but it needs vma.
1671      * This should work on most of architectures by default. If
1672      * architecture needs to do something different it can define
1673      * its own version of the function.
1674      */
1675     flush_dcache_page(page);
1676 }
1677 
1678 /**
1679  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1680  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1681  * instruction.
1682  * Return the address of the breakpoint instruction.
1683  */
1684 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1685 {
1686     return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1687 }
1688 
1689 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1690 {
1691     struct uprobe_task *utask = current->utask;
1692 
1693     if (unlikely(utask && utask->active_uprobe))
1694         return utask->vaddr;
1695 
1696     return instruction_pointer(regs);
1697 }
1698 
1699 static struct return_instance *free_ret_instance(struct return_instance *ri)
1700 {
1701     struct return_instance *next = ri->next;
1702     put_uprobe(ri->uprobe);
1703     kfree(ri);
1704     return next;
1705 }
1706 
1707 /*
1708  * Called with no locks held.
1709  * Called in context of an exiting or an exec-ing thread.
1710  */
1711 void uprobe_free_utask(struct task_struct *t)
1712 {
1713     struct uprobe_task *utask = t->utask;
1714     struct return_instance *ri;
1715 
1716     if (!utask)
1717         return;
1718 
1719     if (utask->active_uprobe)
1720         put_uprobe(utask->active_uprobe);
1721 
1722     ri = utask->return_instances;
1723     while (ri)
1724         ri = free_ret_instance(ri);
1725 
1726     xol_free_insn_slot(t);
1727     kfree(utask);
1728     t->utask = NULL;
1729 }
1730 
1731 /*
1732  * Allocate a uprobe_task object for the task if necessary.
1733  * Called when the thread hits a breakpoint.
1734  *
1735  * Returns:
1736  * - pointer to new uprobe_task on success
1737  * - NULL otherwise
1738  */
1739 static struct uprobe_task *get_utask(void)
1740 {
1741     if (!current->utask)
1742         current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1743     return current->utask;
1744 }
1745 
1746 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1747 {
1748     struct uprobe_task *n_utask;
1749     struct return_instance **p, *o, *n;
1750 
1751     n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1752     if (!n_utask)
1753         return -ENOMEM;
1754     t->utask = n_utask;
1755 
1756     p = &n_utask->return_instances;
1757     for (o = o_utask->return_instances; o; o = o->next) {
1758         n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1759         if (!n)
1760             return -ENOMEM;
1761 
1762         *n = *o;
1763         get_uprobe(n->uprobe);
1764         n->next = NULL;
1765 
1766         *p = n;
1767         p = &n->next;
1768         n_utask->depth++;
1769     }
1770 
1771     return 0;
1772 }
1773 
1774 static void uprobe_warn(struct task_struct *t, const char *msg)
1775 {
1776     pr_warn("uprobe: %s:%d failed to %s\n",
1777             current->comm, current->pid, msg);
1778 }
1779 
1780 static void dup_xol_work(struct callback_head *work)
1781 {
1782     if (current->flags & PF_EXITING)
1783         return;
1784 
1785     if (!__create_xol_area(current->utask->dup_xol_addr) &&
1786             !fatal_signal_pending(current))
1787         uprobe_warn(current, "dup xol area");
1788 }
1789 
1790 /*
1791  * Called in context of a new clone/fork from copy_process.
1792  */
1793 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1794 {
1795     struct uprobe_task *utask = current->utask;
1796     struct mm_struct *mm = current->mm;
1797     struct xol_area *area;
1798 
1799     t->utask = NULL;
1800 
1801     if (!utask || !utask->return_instances)
1802         return;
1803 
1804     if (mm == t->mm && !(flags & CLONE_VFORK))
1805         return;
1806 
1807     if (dup_utask(t, utask))
1808         return uprobe_warn(t, "dup ret instances");
1809 
1810     /* The task can fork() after dup_xol_work() fails */
1811     area = mm->uprobes_state.xol_area;
1812     if (!area)
1813         return uprobe_warn(t, "dup xol area");
1814 
1815     if (mm == t->mm)
1816         return;
1817 
1818     t->utask->dup_xol_addr = area->vaddr;
1819     init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1820     task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
1821 }
1822 
1823 /*
1824  * Current area->vaddr notion assume the trampoline address is always
1825  * equal area->vaddr.
1826  *
1827  * Returns -1 in case the xol_area is not allocated.
1828  */
1829 static unsigned long get_trampoline_vaddr(void)
1830 {
1831     struct xol_area *area;
1832     unsigned long trampoline_vaddr = -1;
1833 
1834     /* Pairs with xol_add_vma() smp_store_release() */
1835     area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1836     if (area)
1837         trampoline_vaddr = area->vaddr;
1838 
1839     return trampoline_vaddr;
1840 }
1841 
1842 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1843                     struct pt_regs *regs)
1844 {
1845     struct return_instance *ri = utask->return_instances;
1846     enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1847 
1848     while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1849         ri = free_ret_instance(ri);
1850         utask->depth--;
1851     }
1852     utask->return_instances = ri;
1853 }
1854 
1855 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1856 {
1857     struct return_instance *ri;
1858     struct uprobe_task *utask;
1859     unsigned long orig_ret_vaddr, trampoline_vaddr;
1860     bool chained;
1861 
1862     if (!get_xol_area())
1863         return;
1864 
1865     utask = get_utask();
1866     if (!utask)
1867         return;
1868 
1869     if (utask->depth >= MAX_URETPROBE_DEPTH) {
1870         printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1871                 " nestedness limit pid/tgid=%d/%d\n",
1872                 current->pid, current->tgid);
1873         return;
1874     }
1875 
1876     ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1877     if (!ri)
1878         return;
1879 
1880     trampoline_vaddr = get_trampoline_vaddr();
1881     orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1882     if (orig_ret_vaddr == -1)
1883         goto fail;
1884 
1885     /* drop the entries invalidated by longjmp() */
1886     chained = (orig_ret_vaddr == trampoline_vaddr);
1887     cleanup_return_instances(utask, chained, regs);
1888 
1889     /*
1890      * We don't want to keep trampoline address in stack, rather keep the
1891      * original return address of first caller thru all the consequent
1892      * instances. This also makes breakpoint unwrapping easier.
1893      */
1894     if (chained) {
1895         if (!utask->return_instances) {
1896             /*
1897              * This situation is not possible. Likely we have an
1898              * attack from user-space.
1899              */
1900             uprobe_warn(current, "handle tail call");
1901             goto fail;
1902         }
1903         orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1904     }
1905 
1906     ri->uprobe = get_uprobe(uprobe);
1907     ri->func = instruction_pointer(regs);
1908     ri->stack = user_stack_pointer(regs);
1909     ri->orig_ret_vaddr = orig_ret_vaddr;
1910     ri->chained = chained;
1911 
1912     utask->depth++;
1913     ri->next = utask->return_instances;
1914     utask->return_instances = ri;
1915 
1916     return;
1917  fail:
1918     kfree(ri);
1919 }
1920 
1921 /* Prepare to single-step probed instruction out of line. */
1922 static int
1923 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1924 {
1925     struct uprobe_task *utask;
1926     unsigned long xol_vaddr;
1927     int err;
1928 
1929     utask = get_utask();
1930     if (!utask)
1931         return -ENOMEM;
1932 
1933     xol_vaddr = xol_get_insn_slot(uprobe);
1934     if (!xol_vaddr)
1935         return -ENOMEM;
1936 
1937     utask->xol_vaddr = xol_vaddr;
1938     utask->vaddr = bp_vaddr;
1939 
1940     err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1941     if (unlikely(err)) {
1942         xol_free_insn_slot(current);
1943         return err;
1944     }
1945 
1946     utask->active_uprobe = uprobe;
1947     utask->state = UTASK_SSTEP;
1948     return 0;
1949 }
1950 
1951 /*
1952  * If we are singlestepping, then ensure this thread is not connected to
1953  * non-fatal signals until completion of singlestep.  When xol insn itself
1954  * triggers the signal,  restart the original insn even if the task is
1955  * already SIGKILL'ed (since coredump should report the correct ip).  This
1956  * is even more important if the task has a handler for SIGSEGV/etc, The
1957  * _same_ instruction should be repeated again after return from the signal
1958  * handler, and SSTEP can never finish in this case.
1959  */
1960 bool uprobe_deny_signal(void)
1961 {
1962     struct task_struct *t = current;
1963     struct uprobe_task *utask = t->utask;
1964 
1965     if (likely(!utask || !utask->active_uprobe))
1966         return false;
1967 
1968     WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1969 
1970     if (task_sigpending(t)) {
1971         spin_lock_irq(&t->sighand->siglock);
1972         clear_tsk_thread_flag(t, TIF_SIGPENDING);
1973         spin_unlock_irq(&t->sighand->siglock);
1974 
1975         if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1976             utask->state = UTASK_SSTEP_TRAPPED;
1977             set_tsk_thread_flag(t, TIF_UPROBE);
1978         }
1979     }
1980 
1981     return true;
1982 }
1983 
1984 static void mmf_recalc_uprobes(struct mm_struct *mm)
1985 {
1986     struct vm_area_struct *vma;
1987 
1988     for (vma = mm->mmap; vma; vma = vma->vm_next) {
1989         if (!valid_vma(vma, false))
1990             continue;
1991         /*
1992          * This is not strictly accurate, we can race with
1993          * uprobe_unregister() and see the already removed
1994          * uprobe if delete_uprobe() was not yet called.
1995          * Or this uprobe can be filtered out.
1996          */
1997         if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1998             return;
1999     }
2000 
2001     clear_bit(MMF_HAS_UPROBES, &mm->flags);
2002 }
2003 
2004 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2005 {
2006     struct page *page;
2007     uprobe_opcode_t opcode;
2008     int result;
2009 
2010     if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2011         return -EINVAL;
2012 
2013     pagefault_disable();
2014     result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2015     pagefault_enable();
2016 
2017     if (likely(result == 0))
2018         goto out;
2019 
2020     /*
2021      * The NULL 'tsk' here ensures that any faults that occur here
2022      * will not be accounted to the task.  'mm' *is* current->mm,
2023      * but we treat this as a 'remote' access since it is
2024      * essentially a kernel access to the memory.
2025      */
2026     result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
2027             NULL, NULL);
2028     if (result < 0)
2029         return result;
2030 
2031     copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2032     put_page(page);
2033  out:
2034     /* This needs to return true for any variant of the trap insn */
2035     return is_trap_insn(&opcode);
2036 }
2037 
2038 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
2039 {
2040     struct mm_struct *mm = current->mm;
2041     struct uprobe *uprobe = NULL;
2042     struct vm_area_struct *vma;
2043 
2044     mmap_read_lock(mm);
2045     vma = vma_lookup(mm, bp_vaddr);
2046     if (vma) {
2047         if (valid_vma(vma, false)) {
2048             struct inode *inode = file_inode(vma->vm_file);
2049             loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2050 
2051             uprobe = find_uprobe(inode, offset);
2052         }
2053 
2054         if (!uprobe)
2055             *is_swbp = is_trap_at_addr(mm, bp_vaddr);
2056     } else {
2057         *is_swbp = -EFAULT;
2058     }
2059 
2060     if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2061         mmf_recalc_uprobes(mm);
2062     mmap_read_unlock(mm);
2063 
2064     return uprobe;
2065 }
2066 
2067 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2068 {
2069     struct uprobe_consumer *uc;
2070     int remove = UPROBE_HANDLER_REMOVE;
2071     bool need_prep = false; /* prepare return uprobe, when needed */
2072 
2073     down_read(&uprobe->register_rwsem);
2074     for (uc = uprobe->consumers; uc; uc = uc->next) {
2075         int rc = 0;
2076 
2077         if (uc->handler) {
2078             rc = uc->handler(uc, regs);
2079             WARN(rc & ~UPROBE_HANDLER_MASK,
2080                 "bad rc=0x%x from %ps()\n", rc, uc->handler);
2081         }
2082 
2083         if (uc->ret_handler)
2084             need_prep = true;
2085 
2086         remove &= rc;
2087     }
2088 
2089     if (need_prep && !remove)
2090         prepare_uretprobe(uprobe, regs); /* put bp at return */
2091 
2092     if (remove && uprobe->consumers) {
2093         WARN_ON(!uprobe_is_active(uprobe));
2094         unapply_uprobe(uprobe, current->mm);
2095     }
2096     up_read(&uprobe->register_rwsem);
2097 }
2098 
2099 static void
2100 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2101 {
2102     struct uprobe *uprobe = ri->uprobe;
2103     struct uprobe_consumer *uc;
2104 
2105     down_read(&uprobe->register_rwsem);
2106     for (uc = uprobe->consumers; uc; uc = uc->next) {
2107         if (uc->ret_handler)
2108             uc->ret_handler(uc, ri->func, regs);
2109     }
2110     up_read(&uprobe->register_rwsem);
2111 }
2112 
2113 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2114 {
2115     bool chained;
2116 
2117     do {
2118         chained = ri->chained;
2119         ri = ri->next;  /* can't be NULL if chained */
2120     } while (chained);
2121 
2122     return ri;
2123 }
2124 
2125 static void handle_trampoline(struct pt_regs *regs)
2126 {
2127     struct uprobe_task *utask;
2128     struct return_instance *ri, *next;
2129     bool valid;
2130 
2131     utask = current->utask;
2132     if (!utask)
2133         goto sigill;
2134 
2135     ri = utask->return_instances;
2136     if (!ri)
2137         goto sigill;
2138 
2139     do {
2140         /*
2141          * We should throw out the frames invalidated by longjmp().
2142          * If this chain is valid, then the next one should be alive
2143          * or NULL; the latter case means that nobody but ri->func
2144          * could hit this trampoline on return. TODO: sigaltstack().
2145          */
2146         next = find_next_ret_chain(ri);
2147         valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2148 
2149         instruction_pointer_set(regs, ri->orig_ret_vaddr);
2150         do {
2151             if (valid)
2152                 handle_uretprobe_chain(ri, regs);
2153             ri = free_ret_instance(ri);
2154             utask->depth--;
2155         } while (ri != next);
2156     } while (!valid);
2157 
2158     utask->return_instances = ri;
2159     return;
2160 
2161  sigill:
2162     uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2163     force_sig(SIGILL);
2164 
2165 }
2166 
2167 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2168 {
2169     return false;
2170 }
2171 
2172 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2173                     struct pt_regs *regs)
2174 {
2175     return true;
2176 }
2177 
2178 /*
2179  * Run handler and ask thread to singlestep.
2180  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2181  */
2182 static void handle_swbp(struct pt_regs *regs)
2183 {
2184     struct uprobe *uprobe;
2185     unsigned long bp_vaddr;
2186     int is_swbp;
2187 
2188     bp_vaddr = uprobe_get_swbp_addr(regs);
2189     if (bp_vaddr == get_trampoline_vaddr())
2190         return handle_trampoline(regs);
2191 
2192     uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
2193     if (!uprobe) {
2194         if (is_swbp > 0) {
2195             /* No matching uprobe; signal SIGTRAP. */
2196             force_sig(SIGTRAP);
2197         } else {
2198             /*
2199              * Either we raced with uprobe_unregister() or we can't
2200              * access this memory. The latter is only possible if
2201              * another thread plays with our ->mm. In both cases
2202              * we can simply restart. If this vma was unmapped we
2203              * can pretend this insn was not executed yet and get
2204              * the (correct) SIGSEGV after restart.
2205              */
2206             instruction_pointer_set(regs, bp_vaddr);
2207         }
2208         return;
2209     }
2210 
2211     /* change it in advance for ->handler() and restart */
2212     instruction_pointer_set(regs, bp_vaddr);
2213 
2214     /*
2215      * TODO: move copy_insn/etc into _register and remove this hack.
2216      * After we hit the bp, _unregister + _register can install the
2217      * new and not-yet-analyzed uprobe at the same address, restart.
2218      */
2219     if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2220         goto out;
2221 
2222     /*
2223      * Pairs with the smp_wmb() in prepare_uprobe().
2224      *
2225      * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2226      * we must also see the stores to &uprobe->arch performed by the
2227      * prepare_uprobe() call.
2228      */
2229     smp_rmb();
2230 
2231     /* Tracing handlers use ->utask to communicate with fetch methods */
2232     if (!get_utask())
2233         goto out;
2234 
2235     if (arch_uprobe_ignore(&uprobe->arch, regs))
2236         goto out;
2237 
2238     handler_chain(uprobe, regs);
2239 
2240     if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2241         goto out;
2242 
2243     if (!pre_ssout(uprobe, regs, bp_vaddr))
2244         return;
2245 
2246     /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2247 out:
2248     put_uprobe(uprobe);
2249 }
2250 
2251 /*
2252  * Perform required fix-ups and disable singlestep.
2253  * Allow pending signals to take effect.
2254  */
2255 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2256 {
2257     struct uprobe *uprobe;
2258     int err = 0;
2259 
2260     uprobe = utask->active_uprobe;
2261     if (utask->state == UTASK_SSTEP_ACK)
2262         err = arch_uprobe_post_xol(&uprobe->arch, regs);
2263     else if (utask->state == UTASK_SSTEP_TRAPPED)
2264         arch_uprobe_abort_xol(&uprobe->arch, regs);
2265     else
2266         WARN_ON_ONCE(1);
2267 
2268     put_uprobe(uprobe);
2269     utask->active_uprobe = NULL;
2270     utask->state = UTASK_RUNNING;
2271     xol_free_insn_slot(current);
2272 
2273     spin_lock_irq(&current->sighand->siglock);
2274     recalc_sigpending(); /* see uprobe_deny_signal() */
2275     spin_unlock_irq(&current->sighand->siglock);
2276 
2277     if (unlikely(err)) {
2278         uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2279         force_sig(SIGILL);
2280     }
2281 }
2282 
2283 /*
2284  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2285  * allows the thread to return from interrupt. After that handle_swbp()
2286  * sets utask->active_uprobe.
2287  *
2288  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2289  * and allows the thread to return from interrupt.
2290  *
2291  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2292  * uprobe_notify_resume().
2293  */
2294 void uprobe_notify_resume(struct pt_regs *regs)
2295 {
2296     struct uprobe_task *utask;
2297 
2298     clear_thread_flag(TIF_UPROBE);
2299 
2300     utask = current->utask;
2301     if (utask && utask->active_uprobe)
2302         handle_singlestep(utask, regs);
2303     else
2304         handle_swbp(regs);
2305 }
2306 
2307 /*
2308  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2309  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2310  */
2311 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2312 {
2313     if (!current->mm)
2314         return 0;
2315 
2316     if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2317         (!current->utask || !current->utask->return_instances))
2318         return 0;
2319 
2320     set_thread_flag(TIF_UPROBE);
2321     return 1;
2322 }
2323 
2324 /*
2325  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2326  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2327  */
2328 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2329 {
2330     struct uprobe_task *utask = current->utask;
2331 
2332     if (!current->mm || !utask || !utask->active_uprobe)
2333         /* task is currently not uprobed */
2334         return 0;
2335 
2336     utask->state = UTASK_SSTEP_ACK;
2337     set_thread_flag(TIF_UPROBE);
2338     return 1;
2339 }
2340 
2341 static struct notifier_block uprobe_exception_nb = {
2342     .notifier_call      = arch_uprobe_exception_notify,
2343     .priority       = INT_MAX-1,    /* notified after kprobes, kgdb */
2344 };
2345 
2346 void __init uprobes_init(void)
2347 {
2348     int i;
2349 
2350     for (i = 0; i < UPROBES_HASH_SZ; i++)
2351         mutex_init(&uprobes_mmap_mutex[i]);
2352 
2353     BUG_ON(register_die_notifier(&uprobe_exception_nb));
2354 }