0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/hw_breakpoint.h>
0012 #include <linux/notifier.h>
0013 #include <linux/kprobes.h>
0014 #include <linux/percpu.h>
0015 #include <linux/kernel.h>
0016 #include <linux/sched.h>
0017 #include <linux/smp.h>
0018 #include <linux/debugfs.h>
0019 #include <linux/init.h>
0020
0021 #include <asm/hw_breakpoint.h>
0022 #include <asm/processor.h>
0023 #include <asm/sstep.h>
0024 #include <asm/debug.h>
0025 #include <asm/hvcall.h>
0026 #include <asm/inst.h>
0027 #include <linux/uaccess.h>
0028
0029
0030
0031
0032
0033 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
0034
0035
0036
0037
0038 int hw_breakpoint_slots(int type)
0039 {
0040 if (type == TYPE_DATA)
0041 return nr_wp_slots();
0042 return 0;
0043 }
0044
0045 static bool single_step_pending(void)
0046 {
0047 int i;
0048
0049 for (i = 0; i < nr_wp_slots(); i++) {
0050 if (current->thread.last_hit_ubp[i])
0051 return true;
0052 }
0053 return false;
0054 }
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 int arch_install_hw_breakpoint(struct perf_event *bp)
0066 {
0067 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
0068 struct perf_event **slot;
0069 int i;
0070
0071 for (i = 0; i < nr_wp_slots(); i++) {
0072 slot = this_cpu_ptr(&bp_per_reg[i]);
0073 if (!*slot) {
0074 *slot = bp;
0075 break;
0076 }
0077 }
0078
0079 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
0080 return -EBUSY;
0081
0082
0083
0084
0085
0086 if (!single_step_pending())
0087 __set_breakpoint(i, info);
0088
0089 return 0;
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
0102 {
0103 struct arch_hw_breakpoint null_brk = {0};
0104 struct perf_event **slot;
0105 int i;
0106
0107 for (i = 0; i < nr_wp_slots(); i++) {
0108 slot = this_cpu_ptr(&bp_per_reg[i]);
0109 if (*slot == bp) {
0110 *slot = NULL;
0111 break;
0112 }
0113 }
0114
0115 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
0116 return;
0117
0118 __set_breakpoint(i, &null_brk);
0119 }
0120
0121 static bool is_ptrace_bp(struct perf_event *bp)
0122 {
0123 return bp->overflow_handler == ptrace_triggered;
0124 }
0125
0126 struct breakpoint {
0127 struct list_head list;
0128 struct perf_event *bp;
0129 bool ptrace_bp;
0130 };
0131
0132 static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
0133 static LIST_HEAD(task_bps);
0134
0135 static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
0136 {
0137 struct breakpoint *tmp;
0138
0139 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
0140 if (!tmp)
0141 return ERR_PTR(-ENOMEM);
0142 tmp->bp = bp;
0143 tmp->ptrace_bp = is_ptrace_bp(bp);
0144 return tmp;
0145 }
0146
0147 static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
0148 {
0149 __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
0150
0151 bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
0152 bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
0153 bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
0154 bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
0155
0156 return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
0157 }
0158
0159 static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
0160 {
0161 return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
0162 }
0163
0164 static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
0165 {
0166 return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
0167 }
0168
0169 static int task_bps_add(struct perf_event *bp)
0170 {
0171 struct breakpoint *tmp;
0172
0173 tmp = alloc_breakpoint(bp);
0174 if (IS_ERR(tmp))
0175 return PTR_ERR(tmp);
0176
0177 list_add(&tmp->list, &task_bps);
0178 return 0;
0179 }
0180
0181 static void task_bps_remove(struct perf_event *bp)
0182 {
0183 struct list_head *pos, *q;
0184
0185 list_for_each_safe(pos, q, &task_bps) {
0186 struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
0187
0188 if (tmp->bp == bp) {
0189 list_del(&tmp->list);
0190 kfree(tmp);
0191 break;
0192 }
0193 }
0194 }
0195
0196
0197
0198
0199
0200 static bool all_task_bps_check(struct perf_event *bp)
0201 {
0202 struct breakpoint *tmp;
0203
0204 list_for_each_entry(tmp, &task_bps, list) {
0205 if (!can_co_exist(tmp, bp))
0206 return true;
0207 }
0208 return false;
0209 }
0210
0211
0212
0213
0214
0215 static bool same_task_bps_check(struct perf_event *bp)
0216 {
0217 struct breakpoint *tmp;
0218
0219 list_for_each_entry(tmp, &task_bps, list) {
0220 if (tmp->bp->hw.target == bp->hw.target &&
0221 !can_co_exist(tmp, bp))
0222 return true;
0223 }
0224 return false;
0225 }
0226
0227 static int cpu_bps_add(struct perf_event *bp)
0228 {
0229 struct breakpoint **cpu_bp;
0230 struct breakpoint *tmp;
0231 int i = 0;
0232
0233 tmp = alloc_breakpoint(bp);
0234 if (IS_ERR(tmp))
0235 return PTR_ERR(tmp);
0236
0237 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
0238 for (i = 0; i < nr_wp_slots(); i++) {
0239 if (!cpu_bp[i]) {
0240 cpu_bp[i] = tmp;
0241 break;
0242 }
0243 }
0244 return 0;
0245 }
0246
0247 static void cpu_bps_remove(struct perf_event *bp)
0248 {
0249 struct breakpoint **cpu_bp;
0250 int i = 0;
0251
0252 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
0253 for (i = 0; i < nr_wp_slots(); i++) {
0254 if (!cpu_bp[i])
0255 continue;
0256
0257 if (cpu_bp[i]->bp == bp) {
0258 kfree(cpu_bp[i]);
0259 cpu_bp[i] = NULL;
0260 break;
0261 }
0262 }
0263 }
0264
0265 static bool cpu_bps_check(int cpu, struct perf_event *bp)
0266 {
0267 struct breakpoint **cpu_bp;
0268 int i;
0269
0270 cpu_bp = per_cpu_ptr(cpu_bps, cpu);
0271 for (i = 0; i < nr_wp_slots(); i++) {
0272 if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
0273 return true;
0274 }
0275 return false;
0276 }
0277
0278 static bool all_cpu_bps_check(struct perf_event *bp)
0279 {
0280 int cpu;
0281
0282 for_each_online_cpu(cpu) {
0283 if (cpu_bps_check(cpu, bp))
0284 return true;
0285 }
0286 return false;
0287 }
0288
0289
0290
0291
0292
0293 int arch_reserve_bp_slot(struct perf_event *bp)
0294 {
0295 int ret;
0296
0297
0298 if (is_ptrace_bp(bp)) {
0299 if (all_cpu_bps_check(bp))
0300 return -ENOSPC;
0301
0302 if (same_task_bps_check(bp))
0303 return -ENOSPC;
0304
0305 return task_bps_add(bp);
0306 }
0307
0308
0309 if (is_kernel_addr(bp->attr.bp_addr))
0310 return 0;
0311
0312 if (bp->hw.target && bp->cpu == -1) {
0313 if (same_task_bps_check(bp))
0314 return -ENOSPC;
0315
0316 return task_bps_add(bp);
0317 } else if (!bp->hw.target && bp->cpu != -1) {
0318 if (all_task_bps_check(bp))
0319 return -ENOSPC;
0320
0321 return cpu_bps_add(bp);
0322 }
0323
0324 if (same_task_bps_check(bp))
0325 return -ENOSPC;
0326
0327 ret = cpu_bps_add(bp);
0328 if (ret)
0329 return ret;
0330 ret = task_bps_add(bp);
0331 if (ret)
0332 cpu_bps_remove(bp);
0333
0334 return ret;
0335 }
0336
0337 void arch_release_bp_slot(struct perf_event *bp)
0338 {
0339 if (!is_kernel_addr(bp->attr.bp_addr)) {
0340 if (bp->hw.target)
0341 task_bps_remove(bp);
0342 if (bp->cpu != -1)
0343 cpu_bps_remove(bp);
0344 }
0345 }
0346
0347
0348
0349
0350
0351 void arch_unregister_hw_breakpoint(struct perf_event *bp)
0352 {
0353
0354
0355
0356
0357
0358
0359 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
0360 int i;
0361
0362 for (i = 0; i < nr_wp_slots(); i++) {
0363 if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
0364 bp->ctx->task->thread.last_hit_ubp[i] = NULL;
0365 }
0366 }
0367 }
0368
0369
0370
0371
0372 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
0373 {
0374 return is_kernel_addr(hw->address);
0375 }
0376
0377 int arch_bp_generic_fields(int type, int *gen_bp_type)
0378 {
0379 *gen_bp_type = 0;
0380 if (type & HW_BRK_TYPE_READ)
0381 *gen_bp_type |= HW_BREAKPOINT_R;
0382 if (type & HW_BRK_TYPE_WRITE)
0383 *gen_bp_type |= HW_BREAKPOINT_W;
0384 if (*gen_bp_type == 0)
0385 return -EINVAL;
0386 return 0;
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408 static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
0409 {
0410 u16 max_len = DABR_MAX_LEN;
0411 u16 hw_len;
0412 unsigned long start_addr, end_addr;
0413
0414 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
0415 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
0416 hw_len = end_addr - start_addr;
0417
0418 if (dawr_enabled()) {
0419 max_len = DAWR_MAX_LEN;
0420
0421 if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
0422 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
0423 return -EINVAL;
0424 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
0425
0426 max_len = U16_MAX;
0427 }
0428
0429 if (hw_len > max_len)
0430 return -EINVAL;
0431
0432 hw->hw_len = hw_len;
0433 return 0;
0434 }
0435
0436
0437
0438
0439 int hw_breakpoint_arch_parse(struct perf_event *bp,
0440 const struct perf_event_attr *attr,
0441 struct arch_hw_breakpoint *hw)
0442 {
0443 int ret = -EINVAL;
0444
0445 if (!bp || !attr->bp_len)
0446 return ret;
0447
0448 hw->type = HW_BRK_TYPE_TRANSLATE;
0449 if (attr->bp_type & HW_BREAKPOINT_R)
0450 hw->type |= HW_BRK_TYPE_READ;
0451 if (attr->bp_type & HW_BREAKPOINT_W)
0452 hw->type |= HW_BRK_TYPE_WRITE;
0453 if (hw->type == HW_BRK_TYPE_TRANSLATE)
0454
0455 return ret;
0456 if (!attr->exclude_user)
0457 hw->type |= HW_BRK_TYPE_USER;
0458 if (!attr->exclude_kernel)
0459 hw->type |= HW_BRK_TYPE_KERNEL;
0460 if (!attr->exclude_hv)
0461 hw->type |= HW_BRK_TYPE_HYP;
0462 hw->address = attr->bp_addr;
0463 hw->len = attr->bp_len;
0464
0465 if (!ppc_breakpoint_available())
0466 return -ENODEV;
0467
0468 return hw_breakpoint_validate_len(hw);
0469 }
0470
0471
0472
0473
0474
0475
0476 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
0477 {
0478 struct arch_hw_breakpoint *info;
0479 int i;
0480
0481 for (i = 0; i < nr_wp_slots(); i++) {
0482 if (unlikely(tsk->thread.last_hit_ubp[i]))
0483 goto reset;
0484 }
0485 return;
0486
0487 reset:
0488 regs_set_return_msr(regs, regs->msr & ~MSR_SE);
0489 for (i = 0; i < nr_wp_slots(); i++) {
0490 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
0491 __set_breakpoint(i, info);
0492 tsk->thread.last_hit_ubp[i] = NULL;
0493 }
0494 }
0495
0496 static bool is_larx_stcx_instr(int type)
0497 {
0498 return type == LARX || type == STCX;
0499 }
0500
0501 static bool is_octword_vsx_instr(int type, int size)
0502 {
0503 return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
0504 }
0505
0506
0507
0508
0509
0510 static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
0511 {
0512 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
0513 info->address);
0514 perf_event_disable_inatomic(bp);
0515 }
0516
0517 static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
0518 {
0519 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
0520 info->address);
0521 perf_event_disable_inatomic(bp);
0522 }
0523
0524 static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
0525 struct arch_hw_breakpoint **info, int *hit,
0526 ppc_inst_t instr)
0527 {
0528 int i;
0529 int stepped;
0530
0531
0532 if (user_mode(regs)) {
0533 for (i = 0; i < nr_wp_slots(); i++) {
0534 if (!hit[i])
0535 continue;
0536 current->thread.last_hit_ubp[i] = bp[i];
0537 info[i] = NULL;
0538 }
0539 regs_set_return_msr(regs, regs->msr | MSR_SE);
0540 return false;
0541 }
0542
0543 stepped = emulate_step(regs, instr);
0544 if (!stepped) {
0545 for (i = 0; i < nr_wp_slots(); i++) {
0546 if (!hit[i])
0547 continue;
0548 handler_error(bp[i], info[i]);
0549 info[i] = NULL;
0550 }
0551 return false;
0552 }
0553 return true;
0554 }
0555
0556 static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
0557 int *hit, unsigned long ea)
0558 {
0559 int i;
0560 unsigned long hw_end_addr;
0561
0562
0563
0564
0565
0566
0567 for (i = 0; i < nr_wp_slots(); i++) {
0568 if (!info[i])
0569 continue;
0570
0571 hw_end_addr = ALIGN(info[i]->address + info[i]->len, HW_BREAKPOINT_SIZE);
0572
0573
0574
0575
0576
0577 if ((hw_end_addr - 1) >= ea)
0578 continue;
0579
0580
0581
0582
0583
0584 if (((hw_end_addr - 1) >> 10) != (ea >> 10))
0585 continue;
0586
0587
0588
0589
0590
0591 if ((ea & 0x800) == ((ea + 64) & 0x800))
0592 continue;
0593
0594 break;
0595 }
0596
0597 if (i == nr_wp_slots())
0598 return;
0599
0600 for (i = 0; i < nr_wp_slots(); i++) {
0601 if (info[i]) {
0602 hit[i] = 1;
0603 info[i]->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
0604 }
0605 }
0606 }
0607
0608 int hw_breakpoint_handler(struct die_args *args)
0609 {
0610 bool err = false;
0611 int rc = NOTIFY_STOP;
0612 struct perf_event *bp[HBP_NUM_MAX] = { NULL };
0613 struct pt_regs *regs = args->regs;
0614 struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
0615 int i;
0616 int hit[HBP_NUM_MAX] = {0};
0617 int nr_hit = 0;
0618 bool ptrace_bp = false;
0619 ppc_inst_t instr = ppc_inst(0);
0620 int type = 0;
0621 int size = 0;
0622 unsigned long ea;
0623
0624
0625 hw_breakpoint_disable();
0626
0627
0628
0629
0630
0631
0632
0633 rcu_read_lock();
0634
0635 if (!IS_ENABLED(CONFIG_PPC_8xx))
0636 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
0637
0638 for (i = 0; i < nr_wp_slots(); i++) {
0639 bp[i] = __this_cpu_read(bp_per_reg[i]);
0640 if (!bp[i])
0641 continue;
0642
0643 info[i] = counter_arch_bp(bp[i]);
0644 info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
0645
0646 if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
0647 if (!IS_ENABLED(CONFIG_PPC_8xx) &&
0648 ppc_inst_equal(instr, ppc_inst(0))) {
0649 handler_error(bp[i], info[i]);
0650 info[i] = NULL;
0651 err = 1;
0652 continue;
0653 }
0654
0655 if (is_ptrace_bp(bp[i]))
0656 ptrace_bp = true;
0657 hit[i] = 1;
0658 nr_hit++;
0659 }
0660 }
0661
0662 if (err)
0663 goto reset;
0664
0665 if (!nr_hit) {
0666
0667 if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
0668 is_octword_vsx_instr(type, size)) {
0669 handle_p10dd1_spurious_exception(info, hit, ea);
0670 } else {
0671 rc = NOTIFY_DONE;
0672 goto out;
0673 }
0674 }
0675
0676
0677
0678
0679
0680
0681
0682 if (ptrace_bp) {
0683 for (i = 0; i < nr_wp_slots(); i++) {
0684 if (!hit[i])
0685 continue;
0686 perf_bp_event(bp[i], regs);
0687 info[i] = NULL;
0688 }
0689 rc = NOTIFY_DONE;
0690 goto reset;
0691 }
0692
0693 if (!IS_ENABLED(CONFIG_PPC_8xx)) {
0694 if (is_larx_stcx_instr(type)) {
0695 for (i = 0; i < nr_wp_slots(); i++) {
0696 if (!hit[i])
0697 continue;
0698 larx_stcx_err(bp[i], info[i]);
0699 info[i] = NULL;
0700 }
0701 goto reset;
0702 }
0703
0704 if (!stepping_handler(regs, bp, info, hit, instr))
0705 goto reset;
0706 }
0707
0708
0709
0710
0711
0712 for (i = 0; i < nr_wp_slots(); i++) {
0713 if (!hit[i])
0714 continue;
0715 if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
0716 perf_bp_event(bp[i], regs);
0717 }
0718
0719 reset:
0720 for (i = 0; i < nr_wp_slots(); i++) {
0721 if (!info[i])
0722 continue;
0723 __set_breakpoint(i, info[i]);
0724 }
0725
0726 out:
0727 rcu_read_unlock();
0728 return rc;
0729 }
0730 NOKPROBE_SYMBOL(hw_breakpoint_handler);
0731
0732
0733
0734
0735 static int single_step_dabr_instruction(struct die_args *args)
0736 {
0737 struct pt_regs *regs = args->regs;
0738 struct perf_event *bp = NULL;
0739 struct arch_hw_breakpoint *info;
0740 int i;
0741 bool found = false;
0742
0743
0744
0745
0746
0747 for (i = 0; i < nr_wp_slots(); i++) {
0748 bp = current->thread.last_hit_ubp[i];
0749
0750 if (!bp)
0751 continue;
0752
0753 found = true;
0754 info = counter_arch_bp(bp);
0755
0756
0757
0758
0759
0760
0761 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
0762 perf_bp_event(bp, regs);
0763 current->thread.last_hit_ubp[i] = NULL;
0764 }
0765
0766 if (!found)
0767 return NOTIFY_DONE;
0768
0769 for (i = 0; i < nr_wp_slots(); i++) {
0770 bp = __this_cpu_read(bp_per_reg[i]);
0771 if (!bp)
0772 continue;
0773
0774 info = counter_arch_bp(bp);
0775 __set_breakpoint(i, info);
0776 }
0777
0778
0779
0780
0781
0782 if (test_thread_flag(TIF_SINGLESTEP))
0783 return NOTIFY_DONE;
0784
0785 return NOTIFY_STOP;
0786 }
0787 NOKPROBE_SYMBOL(single_step_dabr_instruction);
0788
0789
0790
0791
0792 int hw_breakpoint_exceptions_notify(
0793 struct notifier_block *unused, unsigned long val, void *data)
0794 {
0795 int ret = NOTIFY_DONE;
0796
0797 switch (val) {
0798 case DIE_DABR_MATCH:
0799 ret = hw_breakpoint_handler(data);
0800 break;
0801 case DIE_SSTEP:
0802 ret = single_step_dabr_instruction(data);
0803 break;
0804 }
0805
0806 return ret;
0807 }
0808 NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
0809
0810
0811
0812
0813 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
0814 {
0815 int i;
0816 struct thread_struct *t = &tsk->thread;
0817
0818 for (i = 0; i < nr_wp_slots(); i++) {
0819 unregister_hw_breakpoint(t->ptrace_bps[i]);
0820 t->ptrace_bps[i] = NULL;
0821 }
0822 }
0823
0824 void hw_breakpoint_pmu_read(struct perf_event *bp)
0825 {
0826
0827 }
0828
0829 void ptrace_triggered(struct perf_event *bp,
0830 struct perf_sample_data *data, struct pt_regs *regs)
0831 {
0832 struct perf_event_attr attr;
0833
0834
0835
0836
0837
0838
0839
0840 attr = bp->attr;
0841 attr.disabled = true;
0842 modify_user_hw_breakpoint(bp, &attr);
0843 }