0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/sched.h>
0012 #include <linux/ptrace.h>
0013 #include <linux/uprobes.h>
0014 #include <linux/uaccess.h>
0015
0016 #include <linux/kdebug.h>
0017 #include <asm/processor.h>
0018 #include <asm/insn.h>
0019 #include <asm/mmu_context.h>
0020
0021
0022
0023
0024 #define UPROBE_FIX_IP 0x01
0025
0026
0027 #define UPROBE_FIX_CALL 0x02
0028
0029
0030 #define UPROBE_FIX_SETF 0x04
0031
0032 #define UPROBE_FIX_RIP_SI 0x08
0033 #define UPROBE_FIX_RIP_DI 0x10
0034 #define UPROBE_FIX_RIP_BX 0x20
0035 #define UPROBE_FIX_RIP_MASK \
0036 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
0037
0038 #define UPROBE_TRAP_NR UINT_MAX
0039
0040
0041 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
0042 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
0043 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
0044 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
0045
0046 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
0047 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
0048 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
0049 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
0050 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
0051 << (row % 32))
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
0086 static volatile u32 good_insns_32[256 / 32] = {
0087
0088
0089 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
0090 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
0091 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0092 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0093 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0094 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0095 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
0096 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0097 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0098 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0099 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
0102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) |
0104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
0105
0106
0107 };
0108 #else
0109 #define good_insns_32 NULL
0110 #endif
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 #if defined(CONFIG_X86_64)
0148 static volatile u32 good_insns_64[256 / 32] = {
0149
0150
0151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) |
0152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) ,
0153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) |
0154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
0155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
0158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) ,
0161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
0164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) |
0166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
0167
0168
0169 };
0170 #else
0171 #define good_insns_64 NULL
0172 #endif
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 static volatile u32 good_2byte_insns[256 / 32] = {
0199
0200
0201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) |
0202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) ,
0205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) ,
0209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) |
0212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
0215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
0216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
0217
0218
0219 };
0220 #undef W
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static bool is_prefix_bad(struct insn *insn)
0257 {
0258 insn_byte_t p;
0259 int i;
0260
0261 for_each_insn_prefix(insn, i, p) {
0262 insn_attr_t attr;
0263
0264 attr = inat_get_opcode_attribute(p);
0265 switch (attr) {
0266 case INAT_MAKE_PREFIX(INAT_PFX_ES):
0267 case INAT_MAKE_PREFIX(INAT_PFX_CS):
0268 case INAT_MAKE_PREFIX(INAT_PFX_DS):
0269 case INAT_MAKE_PREFIX(INAT_PFX_SS):
0270 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
0271 return true;
0272 }
0273 }
0274 return false;
0275 }
0276
0277 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
0278 {
0279 enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
0280 u32 volatile *good_insns;
0281 int ret;
0282
0283 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
0284 if (ret < 0)
0285 return -ENOEXEC;
0286
0287 if (is_prefix_bad(insn))
0288 return -ENOTSUPP;
0289
0290
0291 if (insn_masking_exception(insn))
0292 return -ENOTSUPP;
0293
0294 if (x86_64)
0295 good_insns = good_insns_64;
0296 else
0297 good_insns = good_insns_32;
0298
0299 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
0300 return 0;
0301
0302 if (insn->opcode.nbytes == 2) {
0303 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
0304 return 0;
0305 }
0306
0307 return -ENOTSUPP;
0308 }
0309
0310 #ifdef CONFIG_X86_64
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
0336 {
0337 u8 *cursor;
0338 u8 reg;
0339 u8 reg2;
0340
0341 if (!insn_rip_relative(insn))
0342 return;
0343
0344
0345
0346
0347
0348
0349 if (insn->rex_prefix.nbytes) {
0350 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
0351
0352 *cursor &= 0xfe;
0353 }
0354
0355
0356
0357
0358 if (insn->vex_prefix.nbytes >= 3) {
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
0370 *cursor |= 0x60;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 reg = MODRM_REG(insn);
0414 reg2 = 0xff;
0415 if (insn->vex_prefix.nbytes)
0416 reg2 = insn->vex_prefix.bytes[2];
0417
0418
0419
0420
0421
0422
0423
0424 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
0425
0426
0427
0428
0429
0430
0431 if (reg != 6 && reg2 != 6) {
0432 reg2 = 6;
0433 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
0434 } else if (reg != 7 && reg2 != 7) {
0435 reg2 = 7;
0436 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
0437
0438 } else {
0439 reg2 = 3;
0440 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
0441 }
0442
0443
0444
0445
0446
0447 cursor = auprobe->insn + insn_offset_modrm(insn);
0448
0449
0450
0451
0452
0453 *cursor = 0x80 | (reg << 3) | reg2;
0454 }
0455
0456 static inline unsigned long *
0457 scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
0458 {
0459 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
0460 return ®s->si;
0461 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
0462 return ®s->di;
0463 return ®s->bx;
0464 }
0465
0466
0467
0468
0469
0470 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
0471 {
0472 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
0473 struct uprobe_task *utask = current->utask;
0474 unsigned long *sr = scratch_reg(auprobe, regs);
0475
0476 utask->autask.saved_scratch_register = *sr;
0477 *sr = utask->vaddr + auprobe->defparam.ilen;
0478 }
0479 }
0480
0481 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
0482 {
0483 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
0484 struct uprobe_task *utask = current->utask;
0485 unsigned long *sr = scratch_reg(auprobe, regs);
0486
0487 *sr = utask->autask.saved_scratch_register;
0488 }
0489 }
0490 #else
0491
0492
0493
0494 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
0495 {
0496 }
0497 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
0498 {
0499 }
0500 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
0501 {
0502 }
0503 #endif
0504
0505 struct uprobe_xol_ops {
0506 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
0507 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
0508 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
0509 void (*abort)(struct arch_uprobe *, struct pt_regs *);
0510 };
0511
0512 static inline int sizeof_long(struct pt_regs *regs)
0513 {
0514
0515
0516
0517 return user_64bit_mode(regs) ? 8 : 4;
0518 }
0519
0520 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
0521 {
0522 riprel_pre_xol(auprobe, regs);
0523 return 0;
0524 }
0525
0526 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
0527 {
0528 unsigned long new_sp = regs->sp - sizeof_long(regs);
0529
0530 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
0531 return -EFAULT;
0532
0533 regs->sp = new_sp;
0534 return 0;
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
0555 {
0556 struct uprobe_task *utask = current->utask;
0557
0558 riprel_post_xol(auprobe, regs);
0559 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
0560 long correction = utask->vaddr - utask->xol_vaddr;
0561 regs->ip += correction;
0562 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
0563 regs->sp += sizeof_long(regs);
0564 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
0565 return -ERESTART;
0566 }
0567
0568 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
0569 utask->autask.saved_tf = true;
0570
0571 return 0;
0572 }
0573
0574 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
0575 {
0576 riprel_post_xol(auprobe, regs);
0577 }
0578
0579 static const struct uprobe_xol_ops default_xol_ops = {
0580 .pre_xol = default_pre_xol_op,
0581 .post_xol = default_post_xol_op,
0582 .abort = default_abort_op,
0583 };
0584
0585 static bool branch_is_call(struct arch_uprobe *auprobe)
0586 {
0587 return auprobe->branch.opc1 == 0xe8;
0588 }
0589
0590 #define CASE_COND \
0591 COND(70, 71, XF(OF)) \
0592 COND(72, 73, XF(CF)) \
0593 COND(74, 75, XF(ZF)) \
0594 COND(78, 79, XF(SF)) \
0595 COND(7a, 7b, XF(PF)) \
0596 COND(76, 77, XF(CF) || XF(ZF)) \
0597 COND(7c, 7d, XF(SF) != XF(OF)) \
0598 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
0599
0600 #define COND(op_y, op_n, expr) \
0601 case 0x ## op_y: DO((expr) != 0) \
0602 case 0x ## op_n: DO((expr) == 0)
0603
0604 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
0605
0606 static bool is_cond_jmp_opcode(u8 opcode)
0607 {
0608 switch (opcode) {
0609 #define DO(expr) \
0610 return true;
0611 CASE_COND
0612 #undef DO
0613
0614 default:
0615 return false;
0616 }
0617 }
0618
0619 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
0620 {
0621 unsigned long flags = regs->flags;
0622
0623 switch (auprobe->branch.opc1) {
0624 #define DO(expr) \
0625 return expr;
0626 CASE_COND
0627 #undef DO
0628
0629 default:
0630 return true;
0631 }
0632 }
0633
0634 #undef XF
0635 #undef COND
0636 #undef CASE_COND
0637
0638 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
0639 {
0640 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
0641 unsigned long offs = (long)auprobe->branch.offs;
0642
0643 if (branch_is_call(auprobe)) {
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 if (emulate_push_stack(regs, new_ip))
0654 return false;
0655 } else if (!check_jmp_cond(auprobe, regs)) {
0656 offs = 0;
0657 }
0658
0659 regs->ip = new_ip + offs;
0660 return true;
0661 }
0662
0663 static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
0664 {
0665 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
0666
0667 if (emulate_push_stack(regs, *src_ptr))
0668 return false;
0669 regs->ip += auprobe->push.ilen;
0670 return true;
0671 }
0672
0673 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
0674 {
0675 BUG_ON(!branch_is_call(auprobe));
0676
0677
0678
0679
0680
0681
0682 regs->sp += sizeof_long(regs);
0683 return -ERESTART;
0684 }
0685
0686 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
0687 {
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702 memset(auprobe->insn + insn_offset_immediate(insn),
0703 0, insn->immediate.nbytes);
0704 }
0705
0706 static const struct uprobe_xol_ops branch_xol_ops = {
0707 .emulate = branch_emulate_op,
0708 .post_xol = branch_post_xol_op,
0709 };
0710
0711 static const struct uprobe_xol_ops push_xol_ops = {
0712 .emulate = push_emulate_op,
0713 };
0714
0715
0716 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
0717 {
0718 u8 opc1 = OPCODE1(insn);
0719 insn_byte_t p;
0720 int i;
0721
0722 switch (opc1) {
0723 case 0xeb:
0724 case 0xe9:
0725 case 0x90:
0726 break;
0727
0728 case 0xe8:
0729 branch_clear_offset(auprobe, insn);
0730 break;
0731
0732 case 0x0f:
0733 if (insn->opcode.nbytes != 2)
0734 return -ENOSYS;
0735
0736
0737
0738
0739 opc1 = OPCODE2(insn) - 0x10;
0740 fallthrough;
0741 default:
0742 if (!is_cond_jmp_opcode(opc1))
0743 return -ENOSYS;
0744 }
0745
0746
0747
0748
0749
0750
0751 for_each_insn_prefix(insn, i, p) {
0752 if (p == 0x66)
0753 return -ENOTSUPP;
0754 }
0755
0756 auprobe->branch.opc1 = opc1;
0757 auprobe->branch.ilen = insn->length;
0758 auprobe->branch.offs = insn->immediate.value;
0759
0760 auprobe->ops = &branch_xol_ops;
0761 return 0;
0762 }
0763
0764
0765 static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
0766 {
0767 u8 opc1 = OPCODE1(insn), reg_offset = 0;
0768
0769 if (opc1 < 0x50 || opc1 > 0x57)
0770 return -ENOSYS;
0771
0772 if (insn->length > 2)
0773 return -ENOSYS;
0774 if (insn->length == 2) {
0775
0776 #ifdef CONFIG_X86_64
0777 if (insn->rex_prefix.nbytes != 1 ||
0778 insn->rex_prefix.bytes[0] != 0x41)
0779 return -ENOSYS;
0780
0781 switch (opc1) {
0782 case 0x50:
0783 reg_offset = offsetof(struct pt_regs, r8);
0784 break;
0785 case 0x51:
0786 reg_offset = offsetof(struct pt_regs, r9);
0787 break;
0788 case 0x52:
0789 reg_offset = offsetof(struct pt_regs, r10);
0790 break;
0791 case 0x53:
0792 reg_offset = offsetof(struct pt_regs, r11);
0793 break;
0794 case 0x54:
0795 reg_offset = offsetof(struct pt_regs, r12);
0796 break;
0797 case 0x55:
0798 reg_offset = offsetof(struct pt_regs, r13);
0799 break;
0800 case 0x56:
0801 reg_offset = offsetof(struct pt_regs, r14);
0802 break;
0803 case 0x57:
0804 reg_offset = offsetof(struct pt_regs, r15);
0805 break;
0806 }
0807 #else
0808 return -ENOSYS;
0809 #endif
0810 } else {
0811 switch (opc1) {
0812 case 0x50:
0813 reg_offset = offsetof(struct pt_regs, ax);
0814 break;
0815 case 0x51:
0816 reg_offset = offsetof(struct pt_regs, cx);
0817 break;
0818 case 0x52:
0819 reg_offset = offsetof(struct pt_regs, dx);
0820 break;
0821 case 0x53:
0822 reg_offset = offsetof(struct pt_regs, bx);
0823 break;
0824 case 0x54:
0825 reg_offset = offsetof(struct pt_regs, sp);
0826 break;
0827 case 0x55:
0828 reg_offset = offsetof(struct pt_regs, bp);
0829 break;
0830 case 0x56:
0831 reg_offset = offsetof(struct pt_regs, si);
0832 break;
0833 case 0x57:
0834 reg_offset = offsetof(struct pt_regs, di);
0835 break;
0836 }
0837 }
0838
0839 auprobe->push.reg_offset = reg_offset;
0840 auprobe->push.ilen = insn->length;
0841 auprobe->ops = &push_xol_ops;
0842 return 0;
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
0853 {
0854 struct insn insn;
0855 u8 fix_ip_or_call = UPROBE_FIX_IP;
0856 int ret;
0857
0858 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
0859 if (ret)
0860 return ret;
0861
0862 ret = branch_setup_xol_ops(auprobe, &insn);
0863 if (ret != -ENOSYS)
0864 return ret;
0865
0866 ret = push_setup_xol_ops(auprobe, &insn);
0867 if (ret != -ENOSYS)
0868 return ret;
0869
0870
0871
0872
0873
0874 switch (OPCODE1(&insn)) {
0875 case 0x9d:
0876 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
0877 break;
0878 case 0xc3:
0879 case 0xcb:
0880 case 0xc2:
0881 case 0xca:
0882 case 0xea:
0883 fix_ip_or_call = 0;
0884 break;
0885 case 0x9a:
0886 fix_ip_or_call = UPROBE_FIX_CALL;
0887 break;
0888 case 0xff:
0889 switch (MODRM_REG(&insn)) {
0890 case 2: case 3:
0891 fix_ip_or_call = UPROBE_FIX_CALL;
0892 break;
0893 case 4: case 5:
0894 fix_ip_or_call = 0;
0895 break;
0896 }
0897 fallthrough;
0898 default:
0899 riprel_analyze(auprobe, &insn);
0900 }
0901
0902 auprobe->defparam.ilen = insn.length;
0903 auprobe->defparam.fixups |= fix_ip_or_call;
0904
0905 auprobe->ops = &default_xol_ops;
0906 return 0;
0907 }
0908
0909
0910
0911
0912
0913
0914 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
0915 {
0916 struct uprobe_task *utask = current->utask;
0917
0918 if (auprobe->ops->pre_xol) {
0919 int err = auprobe->ops->pre_xol(auprobe, regs);
0920 if (err)
0921 return err;
0922 }
0923
0924 regs->ip = utask->xol_vaddr;
0925 utask->autask.saved_trap_nr = current->thread.trap_nr;
0926 current->thread.trap_nr = UPROBE_TRAP_NR;
0927
0928 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
0929 regs->flags |= X86_EFLAGS_TF;
0930 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
0931 set_task_blockstep(current, false);
0932
0933 return 0;
0934 }
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
0947 {
0948 if (t->thread.trap_nr != UPROBE_TRAP_NR)
0949 return true;
0950
0951 return false;
0952 }
0953
0954
0955
0956
0957
0958
0959
0960
0961 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
0962 {
0963 struct uprobe_task *utask = current->utask;
0964 bool send_sigtrap = utask->autask.saved_tf;
0965 int err = 0;
0966
0967 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
0968 current->thread.trap_nr = utask->autask.saved_trap_nr;
0969
0970 if (auprobe->ops->post_xol) {
0971 err = auprobe->ops->post_xol(auprobe, regs);
0972 if (err) {
0973
0974
0975
0976
0977
0978 regs->ip = utask->vaddr;
0979 if (err == -ERESTART)
0980 err = 0;
0981 send_sigtrap = false;
0982 }
0983 }
0984
0985
0986
0987
0988
0989 if (send_sigtrap)
0990 send_sig(SIGTRAP, current, 0);
0991
0992 if (!utask->autask.saved_tf)
0993 regs->flags &= ~X86_EFLAGS_TF;
0994
0995 return err;
0996 }
0997
0998
0999 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1000 {
1001 struct die_args *args = data;
1002 struct pt_regs *regs = args->regs;
1003 int ret = NOTIFY_DONE;
1004
1005
1006 if (regs && !user_mode(regs))
1007 return NOTIFY_DONE;
1008
1009 switch (val) {
1010 case DIE_INT3:
1011 if (uprobe_pre_sstep_notifier(regs))
1012 ret = NOTIFY_STOP;
1013
1014 break;
1015
1016 case DIE_DEBUG:
1017 if (uprobe_post_sstep_notifier(regs))
1018 ret = NOTIFY_STOP;
1019
1020 break;
1021
1022 default:
1023 break;
1024 }
1025
1026 return ret;
1027 }
1028
1029
1030
1031
1032
1033
1034 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1035 {
1036 struct uprobe_task *utask = current->utask;
1037
1038 if (auprobe->ops->abort)
1039 auprobe->ops->abort(auprobe, regs);
1040
1041 current->thread.trap_nr = utask->autask.saved_trap_nr;
1042 regs->ip = utask->vaddr;
1043
1044 if (!utask->autask.saved_tf)
1045 regs->flags &= ~X86_EFLAGS_TF;
1046 }
1047
1048 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1049 {
1050 if (auprobe->ops->emulate)
1051 return auprobe->ops->emulate(auprobe, regs);
1052 return false;
1053 }
1054
1055 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1056 {
1057 bool ret = __skip_sstep(auprobe, regs);
1058 if (ret && (regs->flags & X86_EFLAGS_TF))
1059 send_sig(SIGTRAP, current, 0);
1060 return ret;
1061 }
1062
1063 unsigned long
1064 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1065 {
1066 int rasize = sizeof_long(regs), nleft;
1067 unsigned long orig_ret_vaddr = 0;
1068
1069 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1070 return -1;
1071
1072
1073 if (orig_ret_vaddr == trampoline_vaddr)
1074 return orig_ret_vaddr;
1075
1076 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1077 if (likely(!nleft))
1078 return orig_ret_vaddr;
1079
1080 if (nleft != rasize) {
1081 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1082 current->pid, regs->sp, regs->ip);
1083
1084 force_sig(SIGSEGV);
1085 }
1086
1087 return -1;
1088 }
1089
1090 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1091 struct pt_regs *regs)
1092 {
1093 if (ctx == RP_CHECK_CALL)
1094 return regs->sp < ret->stack;
1095 else
1096 return regs->sp <= ret->stack;
1097 }