0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <bpf/libbpf.h>
0011 #include "debug.h"
0012 #include "bpf-loader.h"
0013 #include "bpf-prologue.h"
0014 #include "probe-finder.h"
0015 #include <errno.h>
0016 #include <stdlib.h>
0017 #include <dwarf-regs.h>
0018 #include <linux/filter.h>
0019
0020 #define BPF_REG_SIZE 8
0021
0022 #define JMP_TO_ERROR_CODE -1
0023 #define JMP_TO_SUCCESS_CODE -2
0024 #define JMP_TO_USER_CODE -3
0025
0026 struct bpf_insn_pos {
0027 struct bpf_insn *begin;
0028 struct bpf_insn *end;
0029 struct bpf_insn *pos;
0030 };
0031
0032 static inline int
0033 pos_get_cnt(struct bpf_insn_pos *pos)
0034 {
0035 return pos->pos - pos->begin;
0036 }
0037
0038 static int
0039 append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
0040 {
0041 if (!pos->pos)
0042 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
0043
0044 if (pos->pos + 1 >= pos->end) {
0045 pr_err("bpf prologue: prologue too long\n");
0046 pos->pos = NULL;
0047 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
0048 }
0049
0050 *(pos->pos)++ = new_insn;
0051 return 0;
0052 }
0053
0054 static int
0055 check_pos(struct bpf_insn_pos *pos)
0056 {
0057 if (!pos->pos || pos->pos >= pos->end)
0058 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
0059 return 0;
0060 }
0061
0062
0063
0064
0065
0066
0067 static int
0068 argtype_to_ldx_size(const char *type)
0069 {
0070 int arg_size = type ? atoi(&type[1]) : 64;
0071
0072 switch (arg_size) {
0073 case 8:
0074 return BPF_B;
0075 case 16:
0076 return BPF_H;
0077 case 32:
0078 return BPF_W;
0079 case 64:
0080 default:
0081 return BPF_DW;
0082 }
0083 }
0084
0085 static const char *
0086 insn_sz_to_str(int insn_sz)
0087 {
0088 switch (insn_sz) {
0089 case BPF_B:
0090 return "BPF_B";
0091 case BPF_H:
0092 return "BPF_H";
0093 case BPF_W:
0094 return "BPF_W";
0095 case BPF_DW:
0096 return "BPF_DW";
0097 default:
0098 return "UNKNOWN";
0099 }
0100 }
0101
0102
0103 #define ins(i, p) append_insn((i), (p))
0104
0105
0106
0107
0108
0109
0110
0111 static int
0112 gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
0113 const char *reg, int target_reg)
0114 {
0115 int offset = regs_query_register_offset(reg);
0116
0117 if (offset < 0) {
0118 pr_err("bpf: prologue: failed to get register %s\n",
0119 reg);
0120 return offset;
0121 }
0122 ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
0123
0124 return check_pos(pos);
0125 }
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 static int
0142 gen_read_mem(struct bpf_insn_pos *pos,
0143 int src_base_addr_reg,
0144 int dst_addr_reg,
0145 long offset,
0146 int probeid)
0147 {
0148
0149 if (src_base_addr_reg != BPF_REG_ARG3)
0150 ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
0151
0152 if (offset)
0153 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
0154
0155
0156 ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
0157
0158
0159 if (dst_addr_reg != BPF_REG_ARG1)
0160 ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
0161
0162
0163 ins(BPF_EMIT_CALL(probeid), pos);
0164
0165
0166
0167
0168
0169 ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
0170 pos);
0171
0172 return check_pos(pos);
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182 static int
0183 gen_prologue_fastpath(struct bpf_insn_pos *pos,
0184 struct probe_trace_arg *args, int nargs)
0185 {
0186 int i, err = 0;
0187
0188 for (i = 0; i < nargs; i++) {
0189 err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
0190 BPF_PROLOGUE_START_ARG_REG + i);
0191 if (err)
0192 goto errout;
0193 }
0194
0195 return check_pos(pos);
0196 errout:
0197 return err;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 static int
0242 gen_prologue_slowpath(struct bpf_insn_pos *pos,
0243 struct probe_trace_arg *args, int nargs)
0244 {
0245 int err, i, probeid;
0246
0247 for (i = 0; i < nargs; i++) {
0248 struct probe_trace_arg *arg = &args[i];
0249 const char *reg = arg->value;
0250 struct probe_trace_arg_ref *ref = NULL;
0251 int stack_offset = (i + 1) * -8;
0252
0253 pr_debug("prologue: fetch arg %d, base reg is %s\n",
0254 i, reg);
0255
0256
0257 err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
0258 BPF_REG_ARG3);
0259 if (err) {
0260 pr_err("prologue: failed to get offset of register %s\n",
0261 reg);
0262 goto errout;
0263 }
0264
0265
0266 ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
0267
0268 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
0269
0270
0271
0272
0273
0274
0275
0276 ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
0277 stack_offset), pos);
0278
0279 ref = arg->ref;
0280 probeid = BPF_FUNC_probe_read_kernel;
0281 while (ref) {
0282 pr_debug("prologue: arg %d: offset %ld\n",
0283 i, ref->offset);
0284
0285 if (ref->user_access)
0286 probeid = BPF_FUNC_probe_read_user;
0287
0288 err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
0289 ref->offset, probeid);
0290 if (err) {
0291 pr_err("prologue: failed to generate probe_read function call\n");
0292 goto errout;
0293 }
0294
0295 ref = ref->next;
0296
0297
0298
0299
0300
0301 if (ref)
0302 ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
0303 BPF_REG_FP, stack_offset), pos);
0304 }
0305 }
0306
0307
0308 for (i = 0; i < nargs; i++) {
0309 int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
0310
0311 pr_debug("prologue: load arg %d, insn_sz is %s\n",
0312 i, insn_sz_to_str(insn_sz));
0313 ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
0314 BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
0315 }
0316
0317 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
0318
0319 return check_pos(pos);
0320 errout:
0321 return err;
0322 }
0323
0324 static int
0325 prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
0326 struct bpf_insn *success_code, struct bpf_insn *user_code)
0327 {
0328 struct bpf_insn *insn;
0329
0330 if (check_pos(pos))
0331 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
0332
0333 for (insn = pos->begin; insn < pos->pos; insn++) {
0334 struct bpf_insn *target;
0335 u8 class = BPF_CLASS(insn->code);
0336 u8 opcode;
0337
0338 if (class != BPF_JMP)
0339 continue;
0340 opcode = BPF_OP(insn->code);
0341 if (opcode == BPF_CALL)
0342 continue;
0343
0344 switch (insn->off) {
0345 case JMP_TO_ERROR_CODE:
0346 target = error_code;
0347 break;
0348 case JMP_TO_SUCCESS_CODE:
0349 target = success_code;
0350 break;
0351 case JMP_TO_USER_CODE:
0352 target = user_code;
0353 break;
0354 default:
0355 pr_err("bpf prologue: internal error: relocation failed\n");
0356 return -BPF_LOADER_ERRNO__PROLOGUE;
0357 }
0358
0359 insn->off = target - (insn + 1);
0360 }
0361 return 0;
0362 }
0363
0364 int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
0365 struct bpf_insn *new_prog, size_t *new_cnt,
0366 size_t cnt_space)
0367 {
0368 struct bpf_insn *success_code = NULL;
0369 struct bpf_insn *error_code = NULL;
0370 struct bpf_insn *user_code = NULL;
0371 struct bpf_insn_pos pos;
0372 bool fastpath = true;
0373 int err = 0, i;
0374
0375 if (!new_prog || !new_cnt)
0376 return -EINVAL;
0377
0378 if (cnt_space > BPF_MAXINSNS)
0379 cnt_space = BPF_MAXINSNS;
0380
0381 pos.begin = new_prog;
0382 pos.end = new_prog + cnt_space;
0383 pos.pos = new_prog;
0384
0385 if (!nargs) {
0386 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
0387 &pos);
0388
0389 if (check_pos(&pos))
0390 goto errout;
0391
0392 *new_cnt = pos_get_cnt(&pos);
0393 return 0;
0394 }
0395
0396 if (nargs > BPF_PROLOGUE_MAX_ARGS) {
0397 pr_warning("bpf: prologue: %d arguments are dropped\n",
0398 nargs - BPF_PROLOGUE_MAX_ARGS);
0399 nargs = BPF_PROLOGUE_MAX_ARGS;
0400 }
0401
0402
0403 for (i = 0; i < nargs; i++) {
0404 struct probe_trace_arg_ref *ref = args[i].ref;
0405
0406 if (args[i].value[0] == '@') {
0407
0408 pr_err("bpf: prologue: global %s%+ld not support\n",
0409 args[i].value, ref ? ref->offset : 0);
0410 return -ENOTSUP;
0411 }
0412
0413 while (ref) {
0414
0415 fastpath = false;
0416
0417
0418
0419
0420
0421
0422
0423 #ifdef __LP64__
0424 #define OFFSET_MAX ((1LL << 31) - 1)
0425 #define OFFSET_MIN ((1LL << 31) * -1)
0426 if (ref->offset > OFFSET_MAX ||
0427 ref->offset < OFFSET_MIN) {
0428 pr_err("bpf: prologue: offset out of bound: %ld\n",
0429 ref->offset);
0430 return -BPF_LOADER_ERRNO__PROLOGUEOOB;
0431 }
0432 #endif
0433 ref = ref->next;
0434 }
0435 }
0436 pr_debug("prologue: pass validation\n");
0437
0438 if (fastpath) {
0439
0440 pr_debug("prologue: fast path\n");
0441 err = gen_prologue_fastpath(&pos, args, nargs);
0442 if (err)
0443 goto errout;
0444 } else {
0445 pr_debug("prologue: slow path\n");
0446
0447
0448 ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
0449
0450 err = gen_prologue_slowpath(&pos, args, nargs);
0451 if (err)
0452 goto errout;
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 error_code = pos.pos;
0463 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
0464 &pos);
0465
0466 for (i = 0; i < nargs; i++)
0467 ins(BPF_ALU64_IMM(BPF_MOV,
0468 BPF_PROLOGUE_START_ARG_REG + i,
0469 0),
0470 &pos);
0471 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
0472 &pos);
0473 }
0474
0475
0476
0477
0478
0479
0480 success_code = pos.pos;
0481 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
0482
0483
0484
0485
0486
0487 user_code = pos.pos;
0488 if (!fastpath) {
0489
0490
0491
0492
0493 ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
0494 err = prologue_relocate(&pos, error_code, success_code,
0495 user_code);
0496 if (err)
0497 goto errout;
0498 }
0499
0500 err = check_pos(&pos);
0501 if (err)
0502 goto errout;
0503
0504 *new_cnt = pos_get_cnt(&pos);
0505 return 0;
0506 errout:
0507 return err;
0508 }