Back to home page

LXR

 
 

    


0001 #define pr_fmt(fmt) "kcov: " fmt
0002 
0003 #define DISABLE_BRANCH_PROFILING
0004 #include <linux/atomic.h>
0005 #include <linux/compiler.h>
0006 #include <linux/errno.h>
0007 #include <linux/export.h>
0008 #include <linux/types.h>
0009 #include <linux/file.h>
0010 #include <linux/fs.h>
0011 #include <linux/init.h>
0012 #include <linux/mm.h>
0013 #include <linux/preempt.h>
0014 #include <linux/printk.h>
0015 #include <linux/sched.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/vmalloc.h>
0019 #include <linux/debugfs.h>
0020 #include <linux/uaccess.h>
0021 #include <linux/kcov.h>
0022 #include <asm/setup.h>
0023 
0024 /*
0025  * kcov descriptor (one per opened debugfs file).
0026  * State transitions of the descriptor:
0027  *  - initial state after open()
0028  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
0029  *  - then, mmap() call (several calls are allowed but not useful)
0030  *  - then, repeated enable/disable for a task (only one task a time allowed)
0031  */
0032 struct kcov {
0033     /*
0034      * Reference counter. We keep one for:
0035      *  - opened file descriptor
0036      *  - task with enabled coverage (we can't unwire it from another task)
0037      */
0038     atomic_t        refcount;
0039     /* The lock protects mode, size, area and t. */
0040     spinlock_t      lock;
0041     enum kcov_mode      mode;
0042     /* Size of arena (in long's for KCOV_MODE_TRACE). */
0043     unsigned        size;
0044     /* Coverage buffer shared with user space. */
0045     void            *area;
0046     /* Task for which we collect coverage, or NULL. */
0047     struct task_struct  *t;
0048 };
0049 
0050 /*
0051  * Entry point from instrumented code.
0052  * This is called once per basic-block/edge.
0053  */
0054 void notrace __sanitizer_cov_trace_pc(void)
0055 {
0056     struct task_struct *t;
0057     enum kcov_mode mode;
0058 
0059     t = current;
0060     /*
0061      * We are interested in code coverage as a function of a syscall inputs,
0062      * so we ignore code executed in interrupts.
0063      * The checks for whether we are in an interrupt are open-coded, because
0064      * 1. We can't use in_interrupt() here, since it also returns true
0065      *    when we are inside local_bh_disable() section.
0066      * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
0067      *    since that leads to slower generated code (three separate tests,
0068      *    one for each of the flags).
0069      */
0070     if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
0071                             | NMI_MASK)))
0072         return;
0073     mode = READ_ONCE(t->kcov_mode);
0074     if (mode == KCOV_MODE_TRACE) {
0075         unsigned long *area;
0076         unsigned long pos;
0077         unsigned long ip = _RET_IP_;
0078 
0079 #ifdef CONFIG_RANDOMIZE_BASE
0080         ip -= kaslr_offset();
0081 #endif
0082 
0083         /*
0084          * There is some code that runs in interrupts but for which
0085          * in_interrupt() returns false (e.g. preempt_schedule_irq()).
0086          * READ_ONCE()/barrier() effectively provides load-acquire wrt
0087          * interrupts, there are paired barrier()/WRITE_ONCE() in
0088          * kcov_ioctl_locked().
0089          */
0090         barrier();
0091         area = t->kcov_area;
0092         /* The first word is number of subsequent PCs. */
0093         pos = READ_ONCE(area[0]) + 1;
0094         if (likely(pos < t->kcov_size)) {
0095             area[pos] = ip;
0096             WRITE_ONCE(area[0], pos);
0097         }
0098     }
0099 }
0100 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
0101 
0102 static void kcov_get(struct kcov *kcov)
0103 {
0104     atomic_inc(&kcov->refcount);
0105 }
0106 
0107 static void kcov_put(struct kcov *kcov)
0108 {
0109     if (atomic_dec_and_test(&kcov->refcount)) {
0110         vfree(kcov->area);
0111         kfree(kcov);
0112     }
0113 }
0114 
0115 void kcov_task_init(struct task_struct *t)
0116 {
0117     t->kcov_mode = KCOV_MODE_DISABLED;
0118     t->kcov_size = 0;
0119     t->kcov_area = NULL;
0120     t->kcov = NULL;
0121 }
0122 
0123 void kcov_task_exit(struct task_struct *t)
0124 {
0125     struct kcov *kcov;
0126 
0127     kcov = t->kcov;
0128     if (kcov == NULL)
0129         return;
0130     spin_lock(&kcov->lock);
0131     if (WARN_ON(kcov->t != t)) {
0132         spin_unlock(&kcov->lock);
0133         return;
0134     }
0135     /* Just to not leave dangling references behind. */
0136     kcov_task_init(t);
0137     kcov->t = NULL;
0138     spin_unlock(&kcov->lock);
0139     kcov_put(kcov);
0140 }
0141 
0142 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
0143 {
0144     int res = 0;
0145     void *area;
0146     struct kcov *kcov = vma->vm_file->private_data;
0147     unsigned long size, off;
0148     struct page *page;
0149 
0150     area = vmalloc_user(vma->vm_end - vma->vm_start);
0151     if (!area)
0152         return -ENOMEM;
0153 
0154     spin_lock(&kcov->lock);
0155     size = kcov->size * sizeof(unsigned long);
0156     if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
0157         vma->vm_end - vma->vm_start != size) {
0158         res = -EINVAL;
0159         goto exit;
0160     }
0161     if (!kcov->area) {
0162         kcov->area = area;
0163         vma->vm_flags |= VM_DONTEXPAND;
0164         spin_unlock(&kcov->lock);
0165         for (off = 0; off < size; off += PAGE_SIZE) {
0166             page = vmalloc_to_page(kcov->area + off);
0167             if (vm_insert_page(vma, vma->vm_start + off, page))
0168                 WARN_ONCE(1, "vm_insert_page() failed");
0169         }
0170         return 0;
0171     }
0172 exit:
0173     spin_unlock(&kcov->lock);
0174     vfree(area);
0175     return res;
0176 }
0177 
0178 static int kcov_open(struct inode *inode, struct file *filep)
0179 {
0180     struct kcov *kcov;
0181 
0182     kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
0183     if (!kcov)
0184         return -ENOMEM;
0185     atomic_set(&kcov->refcount, 1);
0186     spin_lock_init(&kcov->lock);
0187     filep->private_data = kcov;
0188     return nonseekable_open(inode, filep);
0189 }
0190 
0191 static int kcov_close(struct inode *inode, struct file *filep)
0192 {
0193     kcov_put(filep->private_data);
0194     return 0;
0195 }
0196 
0197 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
0198                  unsigned long arg)
0199 {
0200     struct task_struct *t;
0201     unsigned long size, unused;
0202 
0203     switch (cmd) {
0204     case KCOV_INIT_TRACE:
0205         /*
0206          * Enable kcov in trace mode and setup buffer size.
0207          * Must happen before anything else.
0208          */
0209         if (kcov->mode != KCOV_MODE_DISABLED)
0210             return -EBUSY;
0211         /*
0212          * Size must be at least 2 to hold current position and one PC.
0213          * Later we allocate size * sizeof(unsigned long) memory,
0214          * that must not overflow.
0215          */
0216         size = arg;
0217         if (size < 2 || size > INT_MAX / sizeof(unsigned long))
0218             return -EINVAL;
0219         kcov->size = size;
0220         kcov->mode = KCOV_MODE_TRACE;
0221         return 0;
0222     case KCOV_ENABLE:
0223         /*
0224          * Enable coverage for the current task.
0225          * At this point user must have been enabled trace mode,
0226          * and mmapped the file. Coverage collection is disabled only
0227          * at task exit or voluntary by KCOV_DISABLE. After that it can
0228          * be enabled for another task.
0229          */
0230         unused = arg;
0231         if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
0232             kcov->area == NULL)
0233             return -EINVAL;
0234         if (kcov->t != NULL)
0235             return -EBUSY;
0236         t = current;
0237         /* Cache in task struct for performance. */
0238         t->kcov_size = kcov->size;
0239         t->kcov_area = kcov->area;
0240         /* See comment in __sanitizer_cov_trace_pc(). */
0241         barrier();
0242         WRITE_ONCE(t->kcov_mode, kcov->mode);
0243         t->kcov = kcov;
0244         kcov->t = t;
0245         /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
0246         kcov_get(kcov);
0247         return 0;
0248     case KCOV_DISABLE:
0249         /* Disable coverage for the current task. */
0250         unused = arg;
0251         if (unused != 0 || current->kcov != kcov)
0252             return -EINVAL;
0253         t = current;
0254         if (WARN_ON(kcov->t != t))
0255             return -EINVAL;
0256         kcov_task_init(t);
0257         kcov->t = NULL;
0258         kcov_put(kcov);
0259         return 0;
0260     default:
0261         return -ENOTTY;
0262     }
0263 }
0264 
0265 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
0266 {
0267     struct kcov *kcov;
0268     int res;
0269 
0270     kcov = filep->private_data;
0271     spin_lock(&kcov->lock);
0272     res = kcov_ioctl_locked(kcov, cmd, arg);
0273     spin_unlock(&kcov->lock);
0274     return res;
0275 }
0276 
0277 static const struct file_operations kcov_fops = {
0278     .open       = kcov_open,
0279     .unlocked_ioctl = kcov_ioctl,
0280     .mmap       = kcov_mmap,
0281     .release        = kcov_close,
0282 };
0283 
0284 static int __init kcov_init(void)
0285 {
0286     /*
0287      * The kcov debugfs file won't ever get removed and thus,
0288      * there is no need to protect it against removal races. The
0289      * use of debugfs_create_file_unsafe() is actually safe here.
0290      */
0291     if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
0292         pr_err("failed to create kcov in debugfs\n");
0293         return -ENOMEM;
0294     }
0295     return 0;
0296 }
0297 
0298 device_initcall(kcov_init);