0001
0002 #define pr_fmt(fmt) "kcov: " fmt
0003
0004 #define DISABLE_BRANCH_PROFILING
0005 #include <linux/atomic.h>
0006 #include <linux/compiler.h>
0007 #include <linux/errno.h>
0008 #include <linux/export.h>
0009 #include <linux/types.h>
0010 #include <linux/file.h>
0011 #include <linux/fs.h>
0012 #include <linux/hashtable.h>
0013 #include <linux/init.h>
0014 #include <linux/mm.h>
0015 #include <linux/preempt.h>
0016 #include <linux/printk.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/vmalloc.h>
0021 #include <linux/debugfs.h>
0022 #include <linux/uaccess.h>
0023 #include <linux/kcov.h>
0024 #include <linux/refcount.h>
0025 #include <linux/log2.h>
0026 #include <asm/setup.h>
0027
0028 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
0029
0030
0031 #define KCOV_WORDS_PER_CMP 4
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 struct kcov {
0047
0048
0049
0050
0051
0052
0053 refcount_t refcount;
0054
0055 spinlock_t lock;
0056 enum kcov_mode mode;
0057
0058 unsigned int size;
0059
0060 void *area;
0061
0062 struct task_struct *t;
0063
0064 bool remote;
0065
0066 unsigned int remote_size;
0067
0068
0069
0070
0071 int sequence;
0072 };
0073
0074 struct kcov_remote_area {
0075 struct list_head list;
0076 unsigned int size;
0077 };
0078
0079 struct kcov_remote {
0080 u64 handle;
0081 struct kcov *kcov;
0082 struct hlist_node hnode;
0083 };
0084
0085 static DEFINE_SPINLOCK(kcov_remote_lock);
0086 static DEFINE_HASHTABLE(kcov_remote_map, 4);
0087 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
0088
0089 struct kcov_percpu_data {
0090 void *irq_area;
0091 local_lock_t lock;
0092
0093 unsigned int saved_mode;
0094 unsigned int saved_size;
0095 void *saved_area;
0096 struct kcov *saved_kcov;
0097 int saved_sequence;
0098 };
0099
0100 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
0101 .lock = INIT_LOCAL_LOCK(lock),
0102 };
0103
0104
0105 static struct kcov_remote *kcov_remote_find(u64 handle)
0106 {
0107 struct kcov_remote *remote;
0108
0109 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
0110 if (remote->handle == handle)
0111 return remote;
0112 }
0113 return NULL;
0114 }
0115
0116
0117 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
0118 {
0119 struct kcov_remote *remote;
0120
0121 if (kcov_remote_find(handle))
0122 return ERR_PTR(-EEXIST);
0123 remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
0124 if (!remote)
0125 return ERR_PTR(-ENOMEM);
0126 remote->handle = handle;
0127 remote->kcov = kcov;
0128 hash_add(kcov_remote_map, &remote->hnode, handle);
0129 return remote;
0130 }
0131
0132
0133 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
0134 {
0135 struct kcov_remote_area *area;
0136 struct list_head *pos;
0137
0138 list_for_each(pos, &kcov_remote_areas) {
0139 area = list_entry(pos, struct kcov_remote_area, list);
0140 if (area->size == size) {
0141 list_del(&area->list);
0142 return area;
0143 }
0144 }
0145 return NULL;
0146 }
0147
0148
0149 static void kcov_remote_area_put(struct kcov_remote_area *area,
0150 unsigned int size)
0151 {
0152 INIT_LIST_HEAD(&area->list);
0153 area->size = size;
0154 list_add(&area->list, &kcov_remote_areas);
0155 }
0156
0157 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
0158 {
0159 unsigned int mode;
0160
0161
0162
0163
0164
0165
0166 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
0167 return false;
0168 mode = READ_ONCE(t->kcov_mode);
0169
0170
0171
0172
0173
0174
0175
0176 barrier();
0177 return mode == needed_mode;
0178 }
0179
0180 static notrace unsigned long canonicalize_ip(unsigned long ip)
0181 {
0182 #ifdef CONFIG_RANDOMIZE_BASE
0183 ip -= kaslr_offset();
0184 #endif
0185 return ip;
0186 }
0187
0188
0189
0190
0191
0192 void notrace __sanitizer_cov_trace_pc(void)
0193 {
0194 struct task_struct *t;
0195 unsigned long *area;
0196 unsigned long ip = canonicalize_ip(_RET_IP_);
0197 unsigned long pos;
0198
0199 t = current;
0200 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
0201 return;
0202
0203 area = t->kcov_area;
0204
0205 pos = READ_ONCE(area[0]) + 1;
0206 if (likely(pos < t->kcov_size)) {
0207
0208
0209
0210
0211
0212
0213
0214 WRITE_ONCE(area[0], pos);
0215 barrier();
0216 area[pos] = ip;
0217 }
0218 }
0219 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
0220
0221 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
0222 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
0223 {
0224 struct task_struct *t;
0225 u64 *area;
0226 u64 count, start_index, end_pos, max_pos;
0227
0228 t = current;
0229 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
0230 return;
0231
0232 ip = canonicalize_ip(ip);
0233
0234
0235
0236
0237
0238 area = (u64 *)t->kcov_area;
0239 max_pos = t->kcov_size * sizeof(unsigned long);
0240
0241 count = READ_ONCE(area[0]);
0242
0243
0244 start_index = 1 + count * KCOV_WORDS_PER_CMP;
0245 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
0246 if (likely(end_pos <= max_pos)) {
0247
0248 WRITE_ONCE(area[0], count + 1);
0249 barrier();
0250 area[start_index] = type;
0251 area[start_index + 1] = arg1;
0252 area[start_index + 2] = arg2;
0253 area[start_index + 3] = ip;
0254 }
0255 }
0256
0257 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
0258 {
0259 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
0260 }
0261 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
0262
0263 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
0264 {
0265 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
0266 }
0267 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
0268
0269 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
0270 {
0271 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
0272 }
0273 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
0274
0275 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
0276 {
0277 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
0278 }
0279 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
0280
0281 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
0282 {
0283 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
0284 _RET_IP_);
0285 }
0286 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
0287
0288 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
0289 {
0290 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
0291 _RET_IP_);
0292 }
0293 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
0294
0295 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
0296 {
0297 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
0298 _RET_IP_);
0299 }
0300 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
0301
0302 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
0303 {
0304 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
0305 _RET_IP_);
0306 }
0307 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
0308
0309 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
0310 {
0311 u64 i;
0312 u64 count = cases[0];
0313 u64 size = cases[1];
0314 u64 type = KCOV_CMP_CONST;
0315
0316 switch (size) {
0317 case 8:
0318 type |= KCOV_CMP_SIZE(0);
0319 break;
0320 case 16:
0321 type |= KCOV_CMP_SIZE(1);
0322 break;
0323 case 32:
0324 type |= KCOV_CMP_SIZE(2);
0325 break;
0326 case 64:
0327 type |= KCOV_CMP_SIZE(3);
0328 break;
0329 default:
0330 return;
0331 }
0332 for (i = 0; i < count; i++)
0333 write_comp_data(type, cases[i + 2], val, _RET_IP_);
0334 }
0335 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
0336 #endif
0337
0338 static void kcov_start(struct task_struct *t, struct kcov *kcov,
0339 unsigned int size, void *area, enum kcov_mode mode,
0340 int sequence)
0341 {
0342 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
0343 t->kcov = kcov;
0344
0345 t->kcov_size = size;
0346 t->kcov_area = area;
0347 t->kcov_sequence = sequence;
0348
0349 barrier();
0350 WRITE_ONCE(t->kcov_mode, mode);
0351 }
0352
0353 static void kcov_stop(struct task_struct *t)
0354 {
0355 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
0356 barrier();
0357 t->kcov = NULL;
0358 t->kcov_size = 0;
0359 t->kcov_area = NULL;
0360 }
0361
0362 static void kcov_task_reset(struct task_struct *t)
0363 {
0364 kcov_stop(t);
0365 t->kcov_sequence = 0;
0366 t->kcov_handle = 0;
0367 }
0368
0369 void kcov_task_init(struct task_struct *t)
0370 {
0371 kcov_task_reset(t);
0372 t->kcov_handle = current->kcov_handle;
0373 }
0374
0375 static void kcov_reset(struct kcov *kcov)
0376 {
0377 kcov->t = NULL;
0378 kcov->mode = KCOV_MODE_INIT;
0379 kcov->remote = false;
0380 kcov->remote_size = 0;
0381 kcov->sequence++;
0382 }
0383
0384 static void kcov_remote_reset(struct kcov *kcov)
0385 {
0386 int bkt;
0387 struct kcov_remote *remote;
0388 struct hlist_node *tmp;
0389 unsigned long flags;
0390
0391 spin_lock_irqsave(&kcov_remote_lock, flags);
0392 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
0393 if (remote->kcov != kcov)
0394 continue;
0395 hash_del(&remote->hnode);
0396 kfree(remote);
0397 }
0398
0399 kcov_reset(kcov);
0400 spin_unlock_irqrestore(&kcov_remote_lock, flags);
0401 }
0402
0403 static void kcov_disable(struct task_struct *t, struct kcov *kcov)
0404 {
0405 kcov_task_reset(t);
0406 if (kcov->remote)
0407 kcov_remote_reset(kcov);
0408 else
0409 kcov_reset(kcov);
0410 }
0411
0412 static void kcov_get(struct kcov *kcov)
0413 {
0414 refcount_inc(&kcov->refcount);
0415 }
0416
0417 static void kcov_put(struct kcov *kcov)
0418 {
0419 if (refcount_dec_and_test(&kcov->refcount)) {
0420 kcov_remote_reset(kcov);
0421 vfree(kcov->area);
0422 kfree(kcov);
0423 }
0424 }
0425
0426 void kcov_task_exit(struct task_struct *t)
0427 {
0428 struct kcov *kcov;
0429 unsigned long flags;
0430
0431 kcov = t->kcov;
0432 if (kcov == NULL)
0433 return;
0434
0435 spin_lock_irqsave(&kcov->lock, flags);
0436 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459 if (WARN_ON(kcov->t != t)) {
0460 spin_unlock_irqrestore(&kcov->lock, flags);
0461 return;
0462 }
0463
0464 kcov_disable(t, kcov);
0465 spin_unlock_irqrestore(&kcov->lock, flags);
0466 kcov_put(kcov);
0467 }
0468
0469 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
0470 {
0471 int res = 0;
0472 struct kcov *kcov = vma->vm_file->private_data;
0473 unsigned long size, off;
0474 struct page *page;
0475 unsigned long flags;
0476
0477 spin_lock_irqsave(&kcov->lock, flags);
0478 size = kcov->size * sizeof(unsigned long);
0479 if (kcov->area == NULL || vma->vm_pgoff != 0 ||
0480 vma->vm_end - vma->vm_start != size) {
0481 res = -EINVAL;
0482 goto exit;
0483 }
0484 spin_unlock_irqrestore(&kcov->lock, flags);
0485 vma->vm_flags |= VM_DONTEXPAND;
0486 for (off = 0; off < size; off += PAGE_SIZE) {
0487 page = vmalloc_to_page(kcov->area + off);
0488 res = vm_insert_page(vma, vma->vm_start + off, page);
0489 if (res) {
0490 pr_warn_once("kcov: vm_insert_page() failed\n");
0491 return res;
0492 }
0493 }
0494 return 0;
0495 exit:
0496 spin_unlock_irqrestore(&kcov->lock, flags);
0497 return res;
0498 }
0499
0500 static int kcov_open(struct inode *inode, struct file *filep)
0501 {
0502 struct kcov *kcov;
0503
0504 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
0505 if (!kcov)
0506 return -ENOMEM;
0507 kcov->mode = KCOV_MODE_DISABLED;
0508 kcov->sequence = 1;
0509 refcount_set(&kcov->refcount, 1);
0510 spin_lock_init(&kcov->lock);
0511 filep->private_data = kcov;
0512 return nonseekable_open(inode, filep);
0513 }
0514
0515 static int kcov_close(struct inode *inode, struct file *filep)
0516 {
0517 kcov_put(filep->private_data);
0518 return 0;
0519 }
0520
0521 static int kcov_get_mode(unsigned long arg)
0522 {
0523 if (arg == KCOV_TRACE_PC)
0524 return KCOV_MODE_TRACE_PC;
0525 else if (arg == KCOV_TRACE_CMP)
0526 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
0527 return KCOV_MODE_TRACE_CMP;
0528 #else
0529 return -ENOTSUPP;
0530 #endif
0531 else
0532 return -EINVAL;
0533 }
0534
0535
0536
0537
0538
0539
0540 static void kcov_fault_in_area(struct kcov *kcov)
0541 {
0542 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
0543 unsigned long *area = kcov->area;
0544 unsigned long offset;
0545
0546 for (offset = 0; offset < kcov->size; offset += stride)
0547 READ_ONCE(area[offset]);
0548 }
0549
0550 static inline bool kcov_check_handle(u64 handle, bool common_valid,
0551 bool uncommon_valid, bool zero_valid)
0552 {
0553 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
0554 return false;
0555 switch (handle & KCOV_SUBSYSTEM_MASK) {
0556 case KCOV_SUBSYSTEM_COMMON:
0557 return (handle & KCOV_INSTANCE_MASK) ?
0558 common_valid : zero_valid;
0559 case KCOV_SUBSYSTEM_USB:
0560 return uncommon_valid;
0561 default:
0562 return false;
0563 }
0564 return false;
0565 }
0566
0567 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
0568 unsigned long arg)
0569 {
0570 struct task_struct *t;
0571 unsigned long flags, unused;
0572 int mode, i;
0573 struct kcov_remote_arg *remote_arg;
0574 struct kcov_remote *remote;
0575
0576 switch (cmd) {
0577 case KCOV_ENABLE:
0578
0579
0580
0581
0582
0583
0584
0585 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
0586 return -EINVAL;
0587 t = current;
0588 if (kcov->t != NULL || t->kcov != NULL)
0589 return -EBUSY;
0590 mode = kcov_get_mode(arg);
0591 if (mode < 0)
0592 return mode;
0593 kcov_fault_in_area(kcov);
0594 kcov->mode = mode;
0595 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
0596 kcov->sequence);
0597 kcov->t = t;
0598
0599 kcov_get(kcov);
0600 return 0;
0601 case KCOV_DISABLE:
0602
0603 unused = arg;
0604 if (unused != 0 || current->kcov != kcov)
0605 return -EINVAL;
0606 t = current;
0607 if (WARN_ON(kcov->t != t))
0608 return -EINVAL;
0609 kcov_disable(t, kcov);
0610 kcov_put(kcov);
0611 return 0;
0612 case KCOV_REMOTE_ENABLE:
0613 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
0614 return -EINVAL;
0615 t = current;
0616 if (kcov->t != NULL || t->kcov != NULL)
0617 return -EBUSY;
0618 remote_arg = (struct kcov_remote_arg *)arg;
0619 mode = kcov_get_mode(remote_arg->trace_mode);
0620 if (mode < 0)
0621 return mode;
0622 if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
0623 return -EINVAL;
0624 kcov->mode = mode;
0625 t->kcov = kcov;
0626 kcov->t = t;
0627 kcov->remote = true;
0628 kcov->remote_size = remote_arg->area_size;
0629 spin_lock_irqsave(&kcov_remote_lock, flags);
0630 for (i = 0; i < remote_arg->num_handles; i++) {
0631 if (!kcov_check_handle(remote_arg->handles[i],
0632 false, true, false)) {
0633 spin_unlock_irqrestore(&kcov_remote_lock,
0634 flags);
0635 kcov_disable(t, kcov);
0636 return -EINVAL;
0637 }
0638 remote = kcov_remote_add(kcov, remote_arg->handles[i]);
0639 if (IS_ERR(remote)) {
0640 spin_unlock_irqrestore(&kcov_remote_lock,
0641 flags);
0642 kcov_disable(t, kcov);
0643 return PTR_ERR(remote);
0644 }
0645 }
0646 if (remote_arg->common_handle) {
0647 if (!kcov_check_handle(remote_arg->common_handle,
0648 true, false, false)) {
0649 spin_unlock_irqrestore(&kcov_remote_lock,
0650 flags);
0651 kcov_disable(t, kcov);
0652 return -EINVAL;
0653 }
0654 remote = kcov_remote_add(kcov,
0655 remote_arg->common_handle);
0656 if (IS_ERR(remote)) {
0657 spin_unlock_irqrestore(&kcov_remote_lock,
0658 flags);
0659 kcov_disable(t, kcov);
0660 return PTR_ERR(remote);
0661 }
0662 t->kcov_handle = remote_arg->common_handle;
0663 }
0664 spin_unlock_irqrestore(&kcov_remote_lock, flags);
0665
0666 kcov_get(kcov);
0667 return 0;
0668 default:
0669 return -ENOTTY;
0670 }
0671 }
0672
0673 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
0674 {
0675 struct kcov *kcov;
0676 int res;
0677 struct kcov_remote_arg *remote_arg = NULL;
0678 unsigned int remote_num_handles;
0679 unsigned long remote_arg_size;
0680 unsigned long size, flags;
0681 void *area;
0682
0683 kcov = filep->private_data;
0684 switch (cmd) {
0685 case KCOV_INIT_TRACE:
0686
0687
0688
0689
0690
0691
0692
0693 size = arg;
0694 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
0695 return -EINVAL;
0696 area = vmalloc_user(size * sizeof(unsigned long));
0697 if (area == NULL)
0698 return -ENOMEM;
0699 spin_lock_irqsave(&kcov->lock, flags);
0700 if (kcov->mode != KCOV_MODE_DISABLED) {
0701 spin_unlock_irqrestore(&kcov->lock, flags);
0702 vfree(area);
0703 return -EBUSY;
0704 }
0705 kcov->area = area;
0706 kcov->size = size;
0707 kcov->mode = KCOV_MODE_INIT;
0708 spin_unlock_irqrestore(&kcov->lock, flags);
0709 return 0;
0710 case KCOV_REMOTE_ENABLE:
0711 if (get_user(remote_num_handles, (unsigned __user *)(arg +
0712 offsetof(struct kcov_remote_arg, num_handles))))
0713 return -EFAULT;
0714 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
0715 return -EINVAL;
0716 remote_arg_size = struct_size(remote_arg, handles,
0717 remote_num_handles);
0718 remote_arg = memdup_user((void __user *)arg, remote_arg_size);
0719 if (IS_ERR(remote_arg))
0720 return PTR_ERR(remote_arg);
0721 if (remote_arg->num_handles != remote_num_handles) {
0722 kfree(remote_arg);
0723 return -EINVAL;
0724 }
0725 arg = (unsigned long)remote_arg;
0726 fallthrough;
0727 default:
0728
0729
0730
0731
0732 spin_lock_irqsave(&kcov->lock, flags);
0733 res = kcov_ioctl_locked(kcov, cmd, arg);
0734 spin_unlock_irqrestore(&kcov->lock, flags);
0735 kfree(remote_arg);
0736 return res;
0737 }
0738 }
0739
0740 static const struct file_operations kcov_fops = {
0741 .open = kcov_open,
0742 .unlocked_ioctl = kcov_ioctl,
0743 .compat_ioctl = kcov_ioctl,
0744 .mmap = kcov_mmap,
0745 .release = kcov_close,
0746 };
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 static inline bool kcov_mode_enabled(unsigned int mode)
0792 {
0793 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
0794 }
0795
0796 static void kcov_remote_softirq_start(struct task_struct *t)
0797 {
0798 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
0799 unsigned int mode;
0800
0801 mode = READ_ONCE(t->kcov_mode);
0802 barrier();
0803 if (kcov_mode_enabled(mode)) {
0804 data->saved_mode = mode;
0805 data->saved_size = t->kcov_size;
0806 data->saved_area = t->kcov_area;
0807 data->saved_sequence = t->kcov_sequence;
0808 data->saved_kcov = t->kcov;
0809 kcov_stop(t);
0810 }
0811 }
0812
0813 static void kcov_remote_softirq_stop(struct task_struct *t)
0814 {
0815 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
0816
0817 if (data->saved_kcov) {
0818 kcov_start(t, data->saved_kcov, data->saved_size,
0819 data->saved_area, data->saved_mode,
0820 data->saved_sequence);
0821 data->saved_mode = 0;
0822 data->saved_size = 0;
0823 data->saved_area = NULL;
0824 data->saved_sequence = 0;
0825 data->saved_kcov = NULL;
0826 }
0827 }
0828
0829 void kcov_remote_start(u64 handle)
0830 {
0831 struct task_struct *t = current;
0832 struct kcov_remote *remote;
0833 struct kcov *kcov;
0834 unsigned int mode;
0835 void *area;
0836 unsigned int size;
0837 int sequence;
0838 unsigned long flags;
0839
0840 if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
0841 return;
0842 if (!in_task() && !in_serving_softirq())
0843 return;
0844
0845 local_lock_irqsave(&kcov_percpu_data.lock, flags);
0846
0847
0848
0849
0850
0851 mode = READ_ONCE(t->kcov_mode);
0852 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
0853 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
0854 return;
0855 }
0856
0857
0858
0859
0860
0861 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
0862 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
0863 return;
0864 }
0865
0866 spin_lock(&kcov_remote_lock);
0867 remote = kcov_remote_find(handle);
0868 if (!remote) {
0869 spin_unlock(&kcov_remote_lock);
0870 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
0871 return;
0872 }
0873 kcov_debug("handle = %llx, context: %s\n", handle,
0874 in_task() ? "task" : "softirq");
0875 kcov = remote->kcov;
0876
0877 kcov_get(kcov);
0878
0879
0880
0881
0882 mode = kcov->mode;
0883 sequence = kcov->sequence;
0884 if (in_task()) {
0885 size = kcov->remote_size;
0886 area = kcov_remote_area_get(size);
0887 } else {
0888 size = CONFIG_KCOV_IRQ_AREA_SIZE;
0889 area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
0890 }
0891 spin_unlock(&kcov_remote_lock);
0892
0893
0894 if (!area) {
0895 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
0896 area = vmalloc(size * sizeof(unsigned long));
0897 if (!area) {
0898 kcov_put(kcov);
0899 return;
0900 }
0901 local_lock_irqsave(&kcov_percpu_data.lock, flags);
0902 }
0903
0904
0905 *(u64 *)area = 0;
0906
0907 if (in_serving_softirq()) {
0908 kcov_remote_softirq_start(t);
0909 t->kcov_softirq = 1;
0910 }
0911 kcov_start(t, kcov, size, area, mode, sequence);
0912
0913 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
0914
0915 }
0916 EXPORT_SYMBOL(kcov_remote_start);
0917
0918 static void kcov_move_area(enum kcov_mode mode, void *dst_area,
0919 unsigned int dst_area_size, void *src_area)
0920 {
0921 u64 word_size = sizeof(unsigned long);
0922 u64 count_size, entry_size_log;
0923 u64 dst_len, src_len;
0924 void *dst_entries, *src_entries;
0925 u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
0926
0927 kcov_debug("%px %u <= %px %lu\n",
0928 dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
0929
0930 switch (mode) {
0931 case KCOV_MODE_TRACE_PC:
0932 dst_len = READ_ONCE(*(unsigned long *)dst_area);
0933 src_len = *(unsigned long *)src_area;
0934 count_size = sizeof(unsigned long);
0935 entry_size_log = __ilog2_u64(sizeof(unsigned long));
0936 break;
0937 case KCOV_MODE_TRACE_CMP:
0938 dst_len = READ_ONCE(*(u64 *)dst_area);
0939 src_len = *(u64 *)src_area;
0940 count_size = sizeof(u64);
0941 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
0942 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
0943 break;
0944 default:
0945 WARN_ON(1);
0946 return;
0947 }
0948
0949
0950 if (dst_len > ((dst_area_size * word_size - count_size) >>
0951 entry_size_log))
0952 return;
0953 dst_occupied = count_size + (dst_len << entry_size_log);
0954 dst_free = dst_area_size * word_size - dst_occupied;
0955 bytes_to_move = min(dst_free, src_len << entry_size_log);
0956 dst_entries = dst_area + dst_occupied;
0957 src_entries = src_area + count_size;
0958 memcpy(dst_entries, src_entries, bytes_to_move);
0959 entries_moved = bytes_to_move >> entry_size_log;
0960
0961 switch (mode) {
0962 case KCOV_MODE_TRACE_PC:
0963 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
0964 break;
0965 case KCOV_MODE_TRACE_CMP:
0966 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
0967 break;
0968 default:
0969 break;
0970 }
0971 }
0972
0973
0974 void kcov_remote_stop(void)
0975 {
0976 struct task_struct *t = current;
0977 struct kcov *kcov;
0978 unsigned int mode;
0979 void *area;
0980 unsigned int size;
0981 int sequence;
0982 unsigned long flags;
0983
0984 if (!in_task() && !in_serving_softirq())
0985 return;
0986
0987 local_lock_irqsave(&kcov_percpu_data.lock, flags);
0988
0989 mode = READ_ONCE(t->kcov_mode);
0990 barrier();
0991 if (!kcov_mode_enabled(mode)) {
0992 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
0993 return;
0994 }
0995
0996
0997
0998
0999 if (in_serving_softirq() && !t->kcov_softirq) {
1000 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1001 return;
1002 }
1003
1004 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
1005 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1006 return;
1007 }
1008
1009 kcov = t->kcov;
1010 area = t->kcov_area;
1011 size = t->kcov_size;
1012 sequence = t->kcov_sequence;
1013
1014 kcov_stop(t);
1015 if (in_serving_softirq()) {
1016 t->kcov_softirq = 0;
1017 kcov_remote_softirq_stop(t);
1018 }
1019
1020 spin_lock(&kcov->lock);
1021
1022
1023
1024
1025 if (sequence == kcov->sequence && kcov->remote)
1026 kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1027 spin_unlock(&kcov->lock);
1028
1029 if (in_task()) {
1030 spin_lock(&kcov_remote_lock);
1031 kcov_remote_area_put(area, size);
1032 spin_unlock(&kcov_remote_lock);
1033 }
1034
1035 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1036
1037
1038 kcov_put(kcov);
1039 }
1040 EXPORT_SYMBOL(kcov_remote_stop);
1041
1042
1043 u64 kcov_common_handle(void)
1044 {
1045 if (!in_task())
1046 return 0;
1047 return current->kcov_handle;
1048 }
1049 EXPORT_SYMBOL(kcov_common_handle);
1050
1051 static int __init kcov_init(void)
1052 {
1053 int cpu;
1054
1055 for_each_possible_cpu(cpu) {
1056 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1057 sizeof(unsigned long), cpu_to_node(cpu));
1058 if (!area)
1059 return -ENOMEM;
1060 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1061 }
1062
1063
1064
1065
1066
1067
1068 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1069
1070 return 0;
1071 }
1072
1073 device_initcall(kcov_init);