Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
0004  * using the CPU's debug registers.
0005  *
0006  * Copyright (C) 2012 ARM Limited
0007  * Author: Will Deacon <will.deacon@arm.com>
0008  */
0009 
0010 #define pr_fmt(fmt) "hw-breakpoint: " fmt
0011 
0012 #include <linux/compat.h>
0013 #include <linux/cpu_pm.h>
0014 #include <linux/errno.h>
0015 #include <linux/hw_breakpoint.h>
0016 #include <linux/kprobes.h>
0017 #include <linux/perf_event.h>
0018 #include <linux/ptrace.h>
0019 #include <linux/smp.h>
0020 #include <linux/uaccess.h>
0021 
0022 #include <asm/current.h>
0023 #include <asm/debug-monitors.h>
0024 #include <asm/hw_breakpoint.h>
0025 #include <asm/traps.h>
0026 #include <asm/cputype.h>
0027 #include <asm/system_misc.h>
0028 
0029 /* Breakpoint currently in use for each BRP. */
0030 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
0031 
0032 /* Watchpoint currently in use for each WRP. */
0033 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
0034 
0035 /* Currently stepping a per-CPU kernel breakpoint. */
0036 static DEFINE_PER_CPU(int, stepping_kernel_bp);
0037 
0038 /* Number of BRP/WRP registers on this CPU. */
0039 static int core_num_brps;
0040 static int core_num_wrps;
0041 
0042 int hw_breakpoint_slots(int type)
0043 {
0044     /*
0045      * We can be called early, so don't rely on
0046      * our static variables being initialised.
0047      */
0048     switch (type) {
0049     case TYPE_INST:
0050         return get_num_brps();
0051     case TYPE_DATA:
0052         return get_num_wrps();
0053     default:
0054         pr_warn("unknown slot type: %d\n", type);
0055         return 0;
0056     }
0057 }
0058 
0059 #define READ_WB_REG_CASE(OFF, N, REG, VAL)  \
0060     case (OFF + N):             \
0061         AARCH64_DBG_READ(N, REG, VAL);  \
0062         break
0063 
0064 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
0065     case (OFF + N):             \
0066         AARCH64_DBG_WRITE(N, REG, VAL); \
0067         break
0068 
0069 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL)    \
0070     READ_WB_REG_CASE(OFF,  0, REG, VAL);    \
0071     READ_WB_REG_CASE(OFF,  1, REG, VAL);    \
0072     READ_WB_REG_CASE(OFF,  2, REG, VAL);    \
0073     READ_WB_REG_CASE(OFF,  3, REG, VAL);    \
0074     READ_WB_REG_CASE(OFF,  4, REG, VAL);    \
0075     READ_WB_REG_CASE(OFF,  5, REG, VAL);    \
0076     READ_WB_REG_CASE(OFF,  6, REG, VAL);    \
0077     READ_WB_REG_CASE(OFF,  7, REG, VAL);    \
0078     READ_WB_REG_CASE(OFF,  8, REG, VAL);    \
0079     READ_WB_REG_CASE(OFF,  9, REG, VAL);    \
0080     READ_WB_REG_CASE(OFF, 10, REG, VAL);    \
0081     READ_WB_REG_CASE(OFF, 11, REG, VAL);    \
0082     READ_WB_REG_CASE(OFF, 12, REG, VAL);    \
0083     READ_WB_REG_CASE(OFF, 13, REG, VAL);    \
0084     READ_WB_REG_CASE(OFF, 14, REG, VAL);    \
0085     READ_WB_REG_CASE(OFF, 15, REG, VAL)
0086 
0087 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL)   \
0088     WRITE_WB_REG_CASE(OFF,  0, REG, VAL);   \
0089     WRITE_WB_REG_CASE(OFF,  1, REG, VAL);   \
0090     WRITE_WB_REG_CASE(OFF,  2, REG, VAL);   \
0091     WRITE_WB_REG_CASE(OFF,  3, REG, VAL);   \
0092     WRITE_WB_REG_CASE(OFF,  4, REG, VAL);   \
0093     WRITE_WB_REG_CASE(OFF,  5, REG, VAL);   \
0094     WRITE_WB_REG_CASE(OFF,  6, REG, VAL);   \
0095     WRITE_WB_REG_CASE(OFF,  7, REG, VAL);   \
0096     WRITE_WB_REG_CASE(OFF,  8, REG, VAL);   \
0097     WRITE_WB_REG_CASE(OFF,  9, REG, VAL);   \
0098     WRITE_WB_REG_CASE(OFF, 10, REG, VAL);   \
0099     WRITE_WB_REG_CASE(OFF, 11, REG, VAL);   \
0100     WRITE_WB_REG_CASE(OFF, 12, REG, VAL);   \
0101     WRITE_WB_REG_CASE(OFF, 13, REG, VAL);   \
0102     WRITE_WB_REG_CASE(OFF, 14, REG, VAL);   \
0103     WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
0104 
0105 static u64 read_wb_reg(int reg, int n)
0106 {
0107     u64 val = 0;
0108 
0109     switch (reg + n) {
0110     GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
0111     GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
0112     GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
0113     GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
0114     default:
0115         pr_warn("attempt to read from unknown breakpoint register %d\n", n);
0116     }
0117 
0118     return val;
0119 }
0120 NOKPROBE_SYMBOL(read_wb_reg);
0121 
0122 static void write_wb_reg(int reg, int n, u64 val)
0123 {
0124     switch (reg + n) {
0125     GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
0126     GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
0127     GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
0128     GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
0129     default:
0130         pr_warn("attempt to write to unknown breakpoint register %d\n", n);
0131     }
0132     isb();
0133 }
0134 NOKPROBE_SYMBOL(write_wb_reg);
0135 
0136 /*
0137  * Convert a breakpoint privilege level to the corresponding exception
0138  * level.
0139  */
0140 static enum dbg_active_el debug_exception_level(int privilege)
0141 {
0142     switch (privilege) {
0143     case AARCH64_BREAKPOINT_EL0:
0144         return DBG_ACTIVE_EL0;
0145     case AARCH64_BREAKPOINT_EL1:
0146         return DBG_ACTIVE_EL1;
0147     default:
0148         pr_warn("invalid breakpoint privilege level %d\n", privilege);
0149         return -EINVAL;
0150     }
0151 }
0152 NOKPROBE_SYMBOL(debug_exception_level);
0153 
0154 enum hw_breakpoint_ops {
0155     HW_BREAKPOINT_INSTALL,
0156     HW_BREAKPOINT_UNINSTALL,
0157     HW_BREAKPOINT_RESTORE
0158 };
0159 
0160 static int is_compat_bp(struct perf_event *bp)
0161 {
0162     struct task_struct *tsk = bp->hw.target;
0163 
0164     /*
0165      * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
0166      * In this case, use the native interface, since we don't have
0167      * the notion of a "compat CPU" and could end up relying on
0168      * deprecated behaviour if we use unaligned watchpoints in
0169      * AArch64 state.
0170      */
0171     return tsk && is_compat_thread(task_thread_info(tsk));
0172 }
0173 
0174 /**
0175  * hw_breakpoint_slot_setup - Find and setup a perf slot according to
0176  *                operations
0177  *
0178  * @slots: pointer to array of slots
0179  * @max_slots: max number of slots
0180  * @bp: perf_event to setup
0181  * @ops: operation to be carried out on the slot
0182  *
0183  * Return:
0184  *  slot index on success
0185  *  -ENOSPC if no slot is available/matches
0186  *  -EINVAL on wrong operations parameter
0187  */
0188 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
0189                     struct perf_event *bp,
0190                     enum hw_breakpoint_ops ops)
0191 {
0192     int i;
0193     struct perf_event **slot;
0194 
0195     for (i = 0; i < max_slots; ++i) {
0196         slot = &slots[i];
0197         switch (ops) {
0198         case HW_BREAKPOINT_INSTALL:
0199             if (!*slot) {
0200                 *slot = bp;
0201                 return i;
0202             }
0203             break;
0204         case HW_BREAKPOINT_UNINSTALL:
0205             if (*slot == bp) {
0206                 *slot = NULL;
0207                 return i;
0208             }
0209             break;
0210         case HW_BREAKPOINT_RESTORE:
0211             if (*slot == bp)
0212                 return i;
0213             break;
0214         default:
0215             pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
0216             return -EINVAL;
0217         }
0218     }
0219     return -ENOSPC;
0220 }
0221 
0222 static int hw_breakpoint_control(struct perf_event *bp,
0223                  enum hw_breakpoint_ops ops)
0224 {
0225     struct arch_hw_breakpoint *info = counter_arch_bp(bp);
0226     struct perf_event **slots;
0227     struct debug_info *debug_info = &current->thread.debug;
0228     int i, max_slots, ctrl_reg, val_reg, reg_enable;
0229     enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
0230     u32 ctrl;
0231 
0232     if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
0233         /* Breakpoint */
0234         ctrl_reg = AARCH64_DBG_REG_BCR;
0235         val_reg = AARCH64_DBG_REG_BVR;
0236         slots = this_cpu_ptr(bp_on_reg);
0237         max_slots = core_num_brps;
0238         reg_enable = !debug_info->bps_disabled;
0239     } else {
0240         /* Watchpoint */
0241         ctrl_reg = AARCH64_DBG_REG_WCR;
0242         val_reg = AARCH64_DBG_REG_WVR;
0243         slots = this_cpu_ptr(wp_on_reg);
0244         max_slots = core_num_wrps;
0245         reg_enable = !debug_info->wps_disabled;
0246     }
0247 
0248     i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
0249 
0250     if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
0251         return i;
0252 
0253     switch (ops) {
0254     case HW_BREAKPOINT_INSTALL:
0255         /*
0256          * Ensure debug monitors are enabled at the correct exception
0257          * level.
0258          */
0259         enable_debug_monitors(dbg_el);
0260         fallthrough;
0261     case HW_BREAKPOINT_RESTORE:
0262         /* Setup the address register. */
0263         write_wb_reg(val_reg, i, info->address);
0264 
0265         /* Setup the control register. */
0266         ctrl = encode_ctrl_reg(info->ctrl);
0267         write_wb_reg(ctrl_reg, i,
0268                  reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
0269         break;
0270     case HW_BREAKPOINT_UNINSTALL:
0271         /* Reset the control register. */
0272         write_wb_reg(ctrl_reg, i, 0);
0273 
0274         /*
0275          * Release the debug monitors for the correct exception
0276          * level.
0277          */
0278         disable_debug_monitors(dbg_el);
0279         break;
0280     }
0281 
0282     return 0;
0283 }
0284 
0285 /*
0286  * Install a perf counter breakpoint.
0287  */
0288 int arch_install_hw_breakpoint(struct perf_event *bp)
0289 {
0290     return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
0291 }
0292 
0293 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
0294 {
0295     hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
0296 }
0297 
0298 static int get_hbp_len(u8 hbp_len)
0299 {
0300     unsigned int len_in_bytes = 0;
0301 
0302     switch (hbp_len) {
0303     case ARM_BREAKPOINT_LEN_1:
0304         len_in_bytes = 1;
0305         break;
0306     case ARM_BREAKPOINT_LEN_2:
0307         len_in_bytes = 2;
0308         break;
0309     case ARM_BREAKPOINT_LEN_3:
0310         len_in_bytes = 3;
0311         break;
0312     case ARM_BREAKPOINT_LEN_4:
0313         len_in_bytes = 4;
0314         break;
0315     case ARM_BREAKPOINT_LEN_5:
0316         len_in_bytes = 5;
0317         break;
0318     case ARM_BREAKPOINT_LEN_6:
0319         len_in_bytes = 6;
0320         break;
0321     case ARM_BREAKPOINT_LEN_7:
0322         len_in_bytes = 7;
0323         break;
0324     case ARM_BREAKPOINT_LEN_8:
0325         len_in_bytes = 8;
0326         break;
0327     }
0328 
0329     return len_in_bytes;
0330 }
0331 
0332 /*
0333  * Check whether bp virtual address is in kernel space.
0334  */
0335 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
0336 {
0337     unsigned int len;
0338     unsigned long va;
0339 
0340     va = hw->address;
0341     len = get_hbp_len(hw->ctrl.len);
0342 
0343     return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
0344 }
0345 
0346 /*
0347  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
0348  * Hopefully this will disappear when ptrace can bypass the conversion
0349  * to generic breakpoint descriptions.
0350  */
0351 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
0352                int *gen_len, int *gen_type, int *offset)
0353 {
0354     /* Type */
0355     switch (ctrl.type) {
0356     case ARM_BREAKPOINT_EXECUTE:
0357         *gen_type = HW_BREAKPOINT_X;
0358         break;
0359     case ARM_BREAKPOINT_LOAD:
0360         *gen_type = HW_BREAKPOINT_R;
0361         break;
0362     case ARM_BREAKPOINT_STORE:
0363         *gen_type = HW_BREAKPOINT_W;
0364         break;
0365     case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
0366         *gen_type = HW_BREAKPOINT_RW;
0367         break;
0368     default:
0369         return -EINVAL;
0370     }
0371 
0372     if (!ctrl.len)
0373         return -EINVAL;
0374     *offset = __ffs(ctrl.len);
0375 
0376     /* Len */
0377     switch (ctrl.len >> *offset) {
0378     case ARM_BREAKPOINT_LEN_1:
0379         *gen_len = HW_BREAKPOINT_LEN_1;
0380         break;
0381     case ARM_BREAKPOINT_LEN_2:
0382         *gen_len = HW_BREAKPOINT_LEN_2;
0383         break;
0384     case ARM_BREAKPOINT_LEN_3:
0385         *gen_len = HW_BREAKPOINT_LEN_3;
0386         break;
0387     case ARM_BREAKPOINT_LEN_4:
0388         *gen_len = HW_BREAKPOINT_LEN_4;
0389         break;
0390     case ARM_BREAKPOINT_LEN_5:
0391         *gen_len = HW_BREAKPOINT_LEN_5;
0392         break;
0393     case ARM_BREAKPOINT_LEN_6:
0394         *gen_len = HW_BREAKPOINT_LEN_6;
0395         break;
0396     case ARM_BREAKPOINT_LEN_7:
0397         *gen_len = HW_BREAKPOINT_LEN_7;
0398         break;
0399     case ARM_BREAKPOINT_LEN_8:
0400         *gen_len = HW_BREAKPOINT_LEN_8;
0401         break;
0402     default:
0403         return -EINVAL;
0404     }
0405 
0406     return 0;
0407 }
0408 
0409 /*
0410  * Construct an arch_hw_breakpoint from a perf_event.
0411  */
0412 static int arch_build_bp_info(struct perf_event *bp,
0413                   const struct perf_event_attr *attr,
0414                   struct arch_hw_breakpoint *hw)
0415 {
0416     /* Type */
0417     switch (attr->bp_type) {
0418     case HW_BREAKPOINT_X:
0419         hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
0420         break;
0421     case HW_BREAKPOINT_R:
0422         hw->ctrl.type = ARM_BREAKPOINT_LOAD;
0423         break;
0424     case HW_BREAKPOINT_W:
0425         hw->ctrl.type = ARM_BREAKPOINT_STORE;
0426         break;
0427     case HW_BREAKPOINT_RW:
0428         hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
0429         break;
0430     default:
0431         return -EINVAL;
0432     }
0433 
0434     /* Len */
0435     switch (attr->bp_len) {
0436     case HW_BREAKPOINT_LEN_1:
0437         hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
0438         break;
0439     case HW_BREAKPOINT_LEN_2:
0440         hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
0441         break;
0442     case HW_BREAKPOINT_LEN_3:
0443         hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
0444         break;
0445     case HW_BREAKPOINT_LEN_4:
0446         hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
0447         break;
0448     case HW_BREAKPOINT_LEN_5:
0449         hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
0450         break;
0451     case HW_BREAKPOINT_LEN_6:
0452         hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
0453         break;
0454     case HW_BREAKPOINT_LEN_7:
0455         hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
0456         break;
0457     case HW_BREAKPOINT_LEN_8:
0458         hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
0459         break;
0460     default:
0461         return -EINVAL;
0462     }
0463 
0464     /*
0465      * On AArch64, we only permit breakpoints of length 4, whereas
0466      * AArch32 also requires breakpoints of length 2 for Thumb.
0467      * Watchpoints can be of length 1, 2, 4 or 8 bytes.
0468      */
0469     if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
0470         if (is_compat_bp(bp)) {
0471             if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
0472                 hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
0473                 return -EINVAL;
0474         } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
0475             /*
0476              * FIXME: Some tools (I'm looking at you perf) assume
0477              *    that breakpoints should be sizeof(long). This
0478              *    is nonsense. For now, we fix up the parameter
0479              *    but we should probably return -EINVAL instead.
0480              */
0481             hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
0482         }
0483     }
0484 
0485     /* Address */
0486     hw->address = attr->bp_addr;
0487 
0488     /*
0489      * Privilege
0490      * Note that we disallow combined EL0/EL1 breakpoints because
0491      * that would complicate the stepping code.
0492      */
0493     if (arch_check_bp_in_kernelspace(hw))
0494         hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
0495     else
0496         hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
0497 
0498     /* Enabled? */
0499     hw->ctrl.enabled = !attr->disabled;
0500 
0501     return 0;
0502 }
0503 
0504 /*
0505  * Validate the arch-specific HW Breakpoint register settings.
0506  */
0507 int hw_breakpoint_arch_parse(struct perf_event *bp,
0508                  const struct perf_event_attr *attr,
0509                  struct arch_hw_breakpoint *hw)
0510 {
0511     int ret;
0512     u64 alignment_mask, offset;
0513 
0514     /* Build the arch_hw_breakpoint. */
0515     ret = arch_build_bp_info(bp, attr, hw);
0516     if (ret)
0517         return ret;
0518 
0519     /*
0520      * Check address alignment.
0521      * We don't do any clever alignment correction for watchpoints
0522      * because using 64-bit unaligned addresses is deprecated for
0523      * AArch64.
0524      *
0525      * AArch32 tasks expect some simple alignment fixups, so emulate
0526      * that here.
0527      */
0528     if (is_compat_bp(bp)) {
0529         if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
0530             alignment_mask = 0x7;
0531         else
0532             alignment_mask = 0x3;
0533         offset = hw->address & alignment_mask;
0534         switch (offset) {
0535         case 0:
0536             /* Aligned */
0537             break;
0538         case 1:
0539         case 2:
0540             /* Allow halfword watchpoints and breakpoints. */
0541             if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
0542                 break;
0543 
0544             fallthrough;
0545         case 3:
0546             /* Allow single byte watchpoint. */
0547             if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
0548                 break;
0549 
0550             fallthrough;
0551         default:
0552             return -EINVAL;
0553         }
0554     } else {
0555         if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
0556             alignment_mask = 0x3;
0557         else
0558             alignment_mask = 0x7;
0559         offset = hw->address & alignment_mask;
0560     }
0561 
0562     hw->address &= ~alignment_mask;
0563     hw->ctrl.len <<= offset;
0564 
0565     /*
0566      * Disallow per-task kernel breakpoints since these would
0567      * complicate the stepping code.
0568      */
0569     if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
0570         return -EINVAL;
0571 
0572     return 0;
0573 }
0574 
0575 /*
0576  * Enable/disable all of the breakpoints active at the specified
0577  * exception level at the register level.
0578  * This is used when single-stepping after a breakpoint exception.
0579  */
0580 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
0581 {
0582     int i, max_slots, privilege;
0583     u32 ctrl;
0584     struct perf_event **slots;
0585 
0586     switch (reg) {
0587     case AARCH64_DBG_REG_BCR:
0588         slots = this_cpu_ptr(bp_on_reg);
0589         max_slots = core_num_brps;
0590         break;
0591     case AARCH64_DBG_REG_WCR:
0592         slots = this_cpu_ptr(wp_on_reg);
0593         max_slots = core_num_wrps;
0594         break;
0595     default:
0596         return;
0597     }
0598 
0599     for (i = 0; i < max_slots; ++i) {
0600         if (!slots[i])
0601             continue;
0602 
0603         privilege = counter_arch_bp(slots[i])->ctrl.privilege;
0604         if (debug_exception_level(privilege) != el)
0605             continue;
0606 
0607         ctrl = read_wb_reg(reg, i);
0608         if (enable)
0609             ctrl |= 0x1;
0610         else
0611             ctrl &= ~0x1;
0612         write_wb_reg(reg, i, ctrl);
0613     }
0614 }
0615 NOKPROBE_SYMBOL(toggle_bp_registers);
0616 
0617 /*
0618  * Debug exception handlers.
0619  */
0620 static int breakpoint_handler(unsigned long unused, unsigned long esr,
0621                   struct pt_regs *regs)
0622 {
0623     int i, step = 0, *kernel_step;
0624     u32 ctrl_reg;
0625     u64 addr, val;
0626     struct perf_event *bp, **slots;
0627     struct debug_info *debug_info;
0628     struct arch_hw_breakpoint_ctrl ctrl;
0629 
0630     slots = this_cpu_ptr(bp_on_reg);
0631     addr = instruction_pointer(regs);
0632     debug_info = &current->thread.debug;
0633 
0634     for (i = 0; i < core_num_brps; ++i) {
0635         rcu_read_lock();
0636 
0637         bp = slots[i];
0638 
0639         if (bp == NULL)
0640             goto unlock;
0641 
0642         /* Check if the breakpoint value matches. */
0643         val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
0644         if (val != (addr & ~0x3))
0645             goto unlock;
0646 
0647         /* Possible match, check the byte address select to confirm. */
0648         ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
0649         decode_ctrl_reg(ctrl_reg, &ctrl);
0650         if (!((1 << (addr & 0x3)) & ctrl.len))
0651             goto unlock;
0652 
0653         counter_arch_bp(bp)->trigger = addr;
0654         perf_bp_event(bp, regs);
0655 
0656         /* Do we need to handle the stepping? */
0657         if (is_default_overflow_handler(bp))
0658             step = 1;
0659 unlock:
0660         rcu_read_unlock();
0661     }
0662 
0663     if (!step)
0664         return 0;
0665 
0666     if (user_mode(regs)) {
0667         debug_info->bps_disabled = 1;
0668         toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
0669 
0670         /* If we're already stepping a watchpoint, just return. */
0671         if (debug_info->wps_disabled)
0672             return 0;
0673 
0674         if (test_thread_flag(TIF_SINGLESTEP))
0675             debug_info->suspended_step = 1;
0676         else
0677             user_enable_single_step(current);
0678     } else {
0679         toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
0680         kernel_step = this_cpu_ptr(&stepping_kernel_bp);
0681 
0682         if (*kernel_step != ARM_KERNEL_STEP_NONE)
0683             return 0;
0684 
0685         if (kernel_active_single_step()) {
0686             *kernel_step = ARM_KERNEL_STEP_SUSPEND;
0687         } else {
0688             *kernel_step = ARM_KERNEL_STEP_ACTIVE;
0689             kernel_enable_single_step(regs);
0690         }
0691     }
0692 
0693     return 0;
0694 }
0695 NOKPROBE_SYMBOL(breakpoint_handler);
0696 
0697 /*
0698  * Arm64 hardware does not always report a watchpoint hit address that matches
0699  * one of the watchpoints set. It can also report an address "near" the
0700  * watchpoint if a single instruction access both watched and unwatched
0701  * addresses. There is no straight-forward way, short of disassembling the
0702  * offending instruction, to map that address back to the watchpoint. This
0703  * function computes the distance of the memory access from the watchpoint as a
0704  * heuristic for the likelihood that a given access triggered the watchpoint.
0705  *
0706  * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
0707  * exception" of ARMv8 Architecture Reference Manual for details.
0708  *
0709  * The function returns the distance of the address from the bytes watched by
0710  * the watchpoint. In case of an exact match, it returns 0.
0711  */
0712 static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
0713                     struct arch_hw_breakpoint_ctrl *ctrl)
0714 {
0715     u64 wp_low, wp_high;
0716     u32 lens, lene;
0717 
0718     addr = untagged_addr(addr);
0719 
0720     lens = __ffs(ctrl->len);
0721     lene = __fls(ctrl->len);
0722 
0723     wp_low = val + lens;
0724     wp_high = val + lene;
0725     if (addr < wp_low)
0726         return wp_low - addr;
0727     else if (addr > wp_high)
0728         return addr - wp_high;
0729     else
0730         return 0;
0731 }
0732 
0733 static int watchpoint_report(struct perf_event *wp, unsigned long addr,
0734                  struct pt_regs *regs)
0735 {
0736     int step = is_default_overflow_handler(wp);
0737     struct arch_hw_breakpoint *info = counter_arch_bp(wp);
0738 
0739     info->trigger = addr;
0740 
0741     /*
0742      * If we triggered a user watchpoint from a uaccess routine, then
0743      * handle the stepping ourselves since userspace really can't help
0744      * us with this.
0745      */
0746     if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
0747         step = 1;
0748     else
0749         perf_bp_event(wp, regs);
0750 
0751     return step;
0752 }
0753 
0754 static int watchpoint_handler(unsigned long addr, unsigned long esr,
0755                   struct pt_regs *regs)
0756 {
0757     int i, step = 0, *kernel_step, access, closest_match = 0;
0758     u64 min_dist = -1, dist;
0759     u32 ctrl_reg;
0760     u64 val;
0761     struct perf_event *wp, **slots;
0762     struct debug_info *debug_info;
0763     struct arch_hw_breakpoint_ctrl ctrl;
0764 
0765     slots = this_cpu_ptr(wp_on_reg);
0766     debug_info = &current->thread.debug;
0767 
0768     /*
0769      * Find all watchpoints that match the reported address. If no exact
0770      * match is found. Attribute the hit to the closest watchpoint.
0771      */
0772     rcu_read_lock();
0773     for (i = 0; i < core_num_wrps; ++i) {
0774         wp = slots[i];
0775         if (wp == NULL)
0776             continue;
0777 
0778         /*
0779          * Check that the access type matches.
0780          * 0 => load, otherwise => store
0781          */
0782         access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
0783              HW_BREAKPOINT_R;
0784         if (!(access & hw_breakpoint_type(wp)))
0785             continue;
0786 
0787         /* Check if the watchpoint value and byte select match. */
0788         val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
0789         ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
0790         decode_ctrl_reg(ctrl_reg, &ctrl);
0791         dist = get_distance_from_watchpoint(addr, val, &ctrl);
0792         if (dist < min_dist) {
0793             min_dist = dist;
0794             closest_match = i;
0795         }
0796         /* Is this an exact match? */
0797         if (dist != 0)
0798             continue;
0799 
0800         step = watchpoint_report(wp, addr, regs);
0801     }
0802 
0803     /* No exact match found? */
0804     if (min_dist > 0 && min_dist != -1)
0805         step = watchpoint_report(slots[closest_match], addr, regs);
0806 
0807     rcu_read_unlock();
0808 
0809     if (!step)
0810         return 0;
0811 
0812     /*
0813      * We always disable EL0 watchpoints because the kernel can
0814      * cause these to fire via an unprivileged access.
0815      */
0816     toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
0817 
0818     if (user_mode(regs)) {
0819         debug_info->wps_disabled = 1;
0820 
0821         /* If we're already stepping a breakpoint, just return. */
0822         if (debug_info->bps_disabled)
0823             return 0;
0824 
0825         if (test_thread_flag(TIF_SINGLESTEP))
0826             debug_info->suspended_step = 1;
0827         else
0828             user_enable_single_step(current);
0829     } else {
0830         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
0831         kernel_step = this_cpu_ptr(&stepping_kernel_bp);
0832 
0833         if (*kernel_step != ARM_KERNEL_STEP_NONE)
0834             return 0;
0835 
0836         if (kernel_active_single_step()) {
0837             *kernel_step = ARM_KERNEL_STEP_SUSPEND;
0838         } else {
0839             *kernel_step = ARM_KERNEL_STEP_ACTIVE;
0840             kernel_enable_single_step(regs);
0841         }
0842     }
0843 
0844     return 0;
0845 }
0846 NOKPROBE_SYMBOL(watchpoint_handler);
0847 
0848 /*
0849  * Handle single-step exception.
0850  */
0851 int reinstall_suspended_bps(struct pt_regs *regs)
0852 {
0853     struct debug_info *debug_info = &current->thread.debug;
0854     int handled_exception = 0, *kernel_step;
0855 
0856     kernel_step = this_cpu_ptr(&stepping_kernel_bp);
0857 
0858     /*
0859      * Called from single-step exception handler.
0860      * Return 0 if execution can resume, 1 if a SIGTRAP should be
0861      * reported.
0862      */
0863     if (user_mode(regs)) {
0864         if (debug_info->bps_disabled) {
0865             debug_info->bps_disabled = 0;
0866             toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
0867             handled_exception = 1;
0868         }
0869 
0870         if (debug_info->wps_disabled) {
0871             debug_info->wps_disabled = 0;
0872             toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
0873             handled_exception = 1;
0874         }
0875 
0876         if (handled_exception) {
0877             if (debug_info->suspended_step) {
0878                 debug_info->suspended_step = 0;
0879                 /* Allow exception handling to fall-through. */
0880                 handled_exception = 0;
0881             } else {
0882                 user_disable_single_step(current);
0883             }
0884         }
0885     } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
0886         toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
0887         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
0888 
0889         if (!debug_info->wps_disabled)
0890             toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
0891 
0892         if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
0893             kernel_disable_single_step();
0894             handled_exception = 1;
0895         } else {
0896             handled_exception = 0;
0897         }
0898 
0899         *kernel_step = ARM_KERNEL_STEP_NONE;
0900     }
0901 
0902     return !handled_exception;
0903 }
0904 NOKPROBE_SYMBOL(reinstall_suspended_bps);
0905 
0906 /*
0907  * Context-switcher for restoring suspended breakpoints.
0908  */
0909 void hw_breakpoint_thread_switch(struct task_struct *next)
0910 {
0911     /*
0912      *           current        next
0913      * disabled: 0              0     => The usual case, NOTIFY_DONE
0914      *           0              1     => Disable the registers
0915      *           1              0     => Enable the registers
0916      *           1              1     => NOTIFY_DONE. per-task bps will
0917      *                                   get taken care of by perf.
0918      */
0919 
0920     struct debug_info *current_debug_info, *next_debug_info;
0921 
0922     current_debug_info = &current->thread.debug;
0923     next_debug_info = &next->thread.debug;
0924 
0925     /* Update breakpoints. */
0926     if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
0927         toggle_bp_registers(AARCH64_DBG_REG_BCR,
0928                     DBG_ACTIVE_EL0,
0929                     !next_debug_info->bps_disabled);
0930 
0931     /* Update watchpoints. */
0932     if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
0933         toggle_bp_registers(AARCH64_DBG_REG_WCR,
0934                     DBG_ACTIVE_EL0,
0935                     !next_debug_info->wps_disabled);
0936 }
0937 
0938 /*
0939  * CPU initialisation.
0940  */
0941 static int hw_breakpoint_reset(unsigned int cpu)
0942 {
0943     int i;
0944     struct perf_event **slots;
0945     /*
0946      * When a CPU goes through cold-boot, it does not have any installed
0947      * slot, so it is safe to share the same function for restoring and
0948      * resetting breakpoints; when a CPU is hotplugged in, it goes
0949      * through the slots, which are all empty, hence it just resets control
0950      * and value for debug registers.
0951      * When this function is triggered on warm-boot through a CPU PM
0952      * notifier some slots might be initialized; if so they are
0953      * reprogrammed according to the debug slots content.
0954      */
0955     for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
0956         if (slots[i]) {
0957             hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
0958         } else {
0959             write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
0960             write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
0961         }
0962     }
0963 
0964     for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
0965         if (slots[i]) {
0966             hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
0967         } else {
0968             write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
0969             write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
0970         }
0971     }
0972 
0973     return 0;
0974 }
0975 
0976 #ifdef CONFIG_CPU_PM
0977 extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
0978 #else
0979 static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
0980 {
0981 }
0982 #endif
0983 
0984 /*
0985  * One-time initialisation.
0986  */
0987 static int __init arch_hw_breakpoint_init(void)
0988 {
0989     int ret;
0990 
0991     core_num_brps = get_num_brps();
0992     core_num_wrps = get_num_wrps();
0993 
0994     pr_info("found %d breakpoint and %d watchpoint registers.\n",
0995         core_num_brps, core_num_wrps);
0996 
0997     /* Register debug fault handlers. */
0998     hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
0999                   TRAP_HWBKPT, "hw-breakpoint handler");
1000     hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
1001                   TRAP_HWBKPT, "hw-watchpoint handler");
1002 
1003     /*
1004      * Reset the breakpoint resources. We assume that a halting
1005      * debugger will leave the world in a nice state for us.
1006      */
1007     ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
1008               "perf/arm64/hw_breakpoint:starting",
1009               hw_breakpoint_reset, NULL);
1010     if (ret)
1011         pr_err("failed to register CPU hotplug notifier: %d\n", ret);
1012 
1013     /* Register cpu_suspend hw breakpoint restore hook */
1014     cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
1015 
1016     return ret;
1017 }
1018 arch_initcall(arch_hw_breakpoint_init);
1019 
1020 void hw_breakpoint_pmu_read(struct perf_event *bp)
1021 {
1022 }
1023 
1024 /*
1025  * Dummy function to register with die_notifier.
1026  */
1027 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1028                     unsigned long val, void *data)
1029 {
1030     return NOTIFY_DONE;
1031 }