0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bpf.h>
0010 #include <bpf/libbpf.h>
0011 #include <bpf/bpf.h>
0012 #include <linux/filter.h>
0013 #include <linux/err.h>
0014 #include <linux/kernel.h>
0015 #include <linux/string.h>
0016 #include <linux/zalloc.h>
0017 #include <errno.h>
0018 #include <stdlib.h>
0019 #include "debug.h"
0020 #include "evlist.h"
0021 #include "bpf-loader.h"
0022 #include "bpf-prologue.h"
0023 #include "probe-event.h"
0024 #include "probe-finder.h" // for MAX_PROBES
0025 #include "parse-events.h"
0026 #include "strfilter.h"
0027 #include "util.h"
0028 #include "llvm-utils.h"
0029 #include "c++/clang-c.h"
0030 #include "hashmap.h"
0031 #include "asm/bug.h"
0032
0033 #include <internal/xyarray.h>
0034
0035
0036 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
0037
0038 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
0039 const char *fmt, va_list args)
0040 {
0041 return veprintf(1, verbose, pr_fmt(fmt), args);
0042 }
0043
0044 struct bpf_prog_priv {
0045 bool is_tp;
0046 char *sys_name;
0047 char *evt_name;
0048 struct perf_probe_event pev;
0049 bool need_prologue;
0050 struct bpf_insn *insns_buf;
0051 int nr_types;
0052 int *type_mapping;
0053 int *prologue_fds;
0054 };
0055
0056 struct bpf_perf_object {
0057 struct list_head list;
0058 struct bpf_object *obj;
0059 };
0060
0061 struct bpf_preproc_result {
0062 struct bpf_insn *new_insn_ptr;
0063 int new_insn_cnt;
0064 };
0065
0066 static LIST_HEAD(bpf_objects_list);
0067 static struct hashmap *bpf_program_hash;
0068 static struct hashmap *bpf_map_hash;
0069
0070 static struct bpf_perf_object *
0071 bpf_perf_object__next(struct bpf_perf_object *prev)
0072 {
0073 if (!prev) {
0074 if (list_empty(&bpf_objects_list))
0075 return NULL;
0076
0077 return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
0078 }
0079 if (list_is_last(&prev->list, &bpf_objects_list))
0080 return NULL;
0081
0082 return list_next_entry(prev, list);
0083 }
0084
0085 #define bpf_perf_object__for_each(perf_obj, tmp) \
0086 for ((perf_obj) = bpf_perf_object__next(NULL), \
0087 (tmp) = bpf_perf_object__next(perf_obj); \
0088 (perf_obj) != NULL; \
0089 (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
0090
0091 static bool libbpf_initialized;
0092 static int libbpf_sec_handler;
0093
0094 static int bpf_perf_object__add(struct bpf_object *obj)
0095 {
0096 struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
0097
0098 if (perf_obj) {
0099 INIT_LIST_HEAD(&perf_obj->list);
0100 perf_obj->obj = obj;
0101 list_add_tail(&perf_obj->list, &bpf_objects_list);
0102 }
0103 return perf_obj ? 0 : -ENOMEM;
0104 }
0105
0106 static void *program_priv(const struct bpf_program *prog)
0107 {
0108 void *priv;
0109
0110 if (IS_ERR_OR_NULL(bpf_program_hash))
0111 return NULL;
0112 if (!hashmap__find(bpf_program_hash, prog, &priv))
0113 return NULL;
0114 return priv;
0115 }
0116
0117 static struct bpf_insn prologue_init_insn[] = {
0118 BPF_MOV64_IMM(BPF_REG_2, 0),
0119 BPF_MOV64_IMM(BPF_REG_3, 0),
0120 BPF_MOV64_IMM(BPF_REG_4, 0),
0121 BPF_MOV64_IMM(BPF_REG_5, 0),
0122 };
0123
0124 static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
0125 struct bpf_prog_load_opts *opts __maybe_unused,
0126 long cookie __maybe_unused)
0127 {
0128 size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
0129 size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
0130 struct bpf_prog_priv *priv = program_priv(prog);
0131 const struct bpf_insn *orig_insn;
0132 struct bpf_insn *insn;
0133
0134 if (IS_ERR_OR_NULL(priv)) {
0135 pr_debug("bpf: failed to get private field\n");
0136 return -BPF_LOADER_ERRNO__INTERNAL;
0137 }
0138
0139 if (!priv->need_prologue)
0140 return 0;
0141
0142
0143 orig_insn = bpf_program__insns(prog);
0144 orig_insn_cnt = bpf_program__insn_cnt(prog);
0145 init_size = init_size_cnt * sizeof(*insn);
0146 orig_size = orig_insn_cnt * sizeof(*insn);
0147
0148 insn_cnt = orig_insn_cnt + init_size_cnt;
0149 insn = malloc(insn_cnt * sizeof(*insn));
0150 if (!insn)
0151 return -ENOMEM;
0152
0153 memcpy(insn, prologue_init_insn, init_size);
0154 memcpy((char *) insn + init_size, orig_insn, orig_size);
0155 bpf_program__set_insns(prog, insn, insn_cnt);
0156 return 0;
0157 }
0158
0159 static int libbpf_init(void)
0160 {
0161 LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
0162 .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
0163 );
0164
0165 if (libbpf_initialized)
0166 return 0;
0167
0168 libbpf_set_print(libbpf_perf_print);
0169 libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
0170 0, &handler_opts);
0171 if (libbpf_sec_handler < 0) {
0172 pr_debug("bpf: failed to register libbpf section handler: %d\n",
0173 libbpf_sec_handler);
0174 return -BPF_LOADER_ERRNO__INTERNAL;
0175 }
0176 libbpf_initialized = true;
0177 return 0;
0178 }
0179
0180 struct bpf_object *
0181 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
0182 {
0183 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
0184 struct bpf_object *obj;
0185 int err;
0186
0187 err = libbpf_init();
0188 if (err)
0189 return ERR_PTR(err);
0190
0191 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
0192 if (IS_ERR_OR_NULL(obj)) {
0193 pr_debug("bpf: failed to load buffer\n");
0194 return ERR_PTR(-EINVAL);
0195 }
0196
0197 if (bpf_perf_object__add(obj)) {
0198 bpf_object__close(obj);
0199 return ERR_PTR(-ENOMEM);
0200 }
0201
0202 return obj;
0203 }
0204
0205 static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
0206 {
0207 list_del(&perf_obj->list);
0208 bpf_object__close(perf_obj->obj);
0209 free(perf_obj);
0210 }
0211
0212 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
0213 {
0214 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
0215 struct bpf_object *obj;
0216 int err;
0217
0218 err = libbpf_init();
0219 if (err)
0220 return ERR_PTR(err);
0221
0222 if (source) {
0223 void *obj_buf;
0224 size_t obj_buf_sz;
0225
0226 perf_clang__init();
0227 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
0228 perf_clang__cleanup();
0229 if (err) {
0230 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
0231 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
0232 if (err)
0233 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
0234 } else
0235 pr_debug("bpf: successful builtin compilation\n");
0236 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
0237
0238 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
0239 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
0240
0241 free(obj_buf);
0242 } else {
0243 obj = bpf_object__open(filename);
0244 }
0245
0246 if (IS_ERR_OR_NULL(obj)) {
0247 pr_debug("bpf: failed to load %s\n", filename);
0248 return obj;
0249 }
0250
0251 if (bpf_perf_object__add(obj)) {
0252 bpf_object__close(obj);
0253 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
0254 }
0255
0256 return obj;
0257 }
0258
0259 static void close_prologue_programs(struct bpf_prog_priv *priv)
0260 {
0261 struct perf_probe_event *pev;
0262 int i, fd;
0263
0264 if (!priv->need_prologue)
0265 return;
0266 pev = &priv->pev;
0267 for (i = 0; i < pev->ntevs; i++) {
0268 fd = priv->prologue_fds[i];
0269 if (fd != -1)
0270 close(fd);
0271 }
0272 }
0273
0274 static void
0275 clear_prog_priv(const struct bpf_program *prog __maybe_unused,
0276 void *_priv)
0277 {
0278 struct bpf_prog_priv *priv = _priv;
0279
0280 close_prologue_programs(priv);
0281 cleanup_perf_probe_events(&priv->pev, 1);
0282 zfree(&priv->insns_buf);
0283 zfree(&priv->prologue_fds);
0284 zfree(&priv->type_mapping);
0285 zfree(&priv->sys_name);
0286 zfree(&priv->evt_name);
0287 free(priv);
0288 }
0289
0290 static void bpf_program_hash_free(void)
0291 {
0292 struct hashmap_entry *cur;
0293 size_t bkt;
0294
0295 if (IS_ERR_OR_NULL(bpf_program_hash))
0296 return;
0297
0298 hashmap__for_each_entry(bpf_program_hash, cur, bkt)
0299 clear_prog_priv(cur->key, cur->value);
0300
0301 hashmap__free(bpf_program_hash);
0302 bpf_program_hash = NULL;
0303 }
0304
0305 static void bpf_map_hash_free(void);
0306
0307 void bpf__clear(void)
0308 {
0309 struct bpf_perf_object *perf_obj, *tmp;
0310
0311 bpf_perf_object__for_each(perf_obj, tmp) {
0312 bpf__unprobe(perf_obj->obj);
0313 bpf_perf_object__close(perf_obj);
0314 }
0315
0316 bpf_program_hash_free();
0317 bpf_map_hash_free();
0318 }
0319
0320 static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
0321 {
0322 return (size_t) __key;
0323 }
0324
0325 static bool ptr_equal(const void *key1, const void *key2,
0326 void *ctx __maybe_unused)
0327 {
0328 return key1 == key2;
0329 }
0330
0331 static int program_set_priv(struct bpf_program *prog, void *priv)
0332 {
0333 void *old_priv;
0334
0335
0336
0337
0338
0339 if (IS_ERR(bpf_program_hash))
0340 return PTR_ERR(bpf_program_hash);
0341
0342 if (!bpf_program_hash) {
0343 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
0344 if (IS_ERR(bpf_program_hash))
0345 return PTR_ERR(bpf_program_hash);
0346 }
0347
0348 old_priv = program_priv(prog);
0349 if (old_priv) {
0350 clear_prog_priv(prog, old_priv);
0351 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
0352 }
0353 return hashmap__add(bpf_program_hash, prog, priv);
0354 }
0355
0356 static int
0357 prog_config__exec(const char *value, struct perf_probe_event *pev)
0358 {
0359 pev->uprobes = true;
0360 pev->target = strdup(value);
0361 if (!pev->target)
0362 return -ENOMEM;
0363 return 0;
0364 }
0365
0366 static int
0367 prog_config__module(const char *value, struct perf_probe_event *pev)
0368 {
0369 pev->uprobes = false;
0370 pev->target = strdup(value);
0371 if (!pev->target)
0372 return -ENOMEM;
0373 return 0;
0374 }
0375
0376 static int
0377 prog_config__bool(const char *value, bool *pbool, bool invert)
0378 {
0379 int err;
0380 bool bool_value;
0381
0382 if (!pbool)
0383 return -EINVAL;
0384
0385 err = strtobool(value, &bool_value);
0386 if (err)
0387 return err;
0388
0389 *pbool = invert ? !bool_value : bool_value;
0390 return 0;
0391 }
0392
0393 static int
0394 prog_config__inlines(const char *value,
0395 struct perf_probe_event *pev __maybe_unused)
0396 {
0397 return prog_config__bool(value, &probe_conf.no_inlines, true);
0398 }
0399
0400 static int
0401 prog_config__force(const char *value,
0402 struct perf_probe_event *pev __maybe_unused)
0403 {
0404 return prog_config__bool(value, &probe_conf.force_add, false);
0405 }
0406
0407 static struct {
0408 const char *key;
0409 const char *usage;
0410 const char *desc;
0411 int (*func)(const char *, struct perf_probe_event *);
0412 } bpf_prog_config_terms[] = {
0413 {
0414 .key = "exec",
0415 .usage = "exec=<full path of file>",
0416 .desc = "Set uprobe target",
0417 .func = prog_config__exec,
0418 },
0419 {
0420 .key = "module",
0421 .usage = "module=<module name> ",
0422 .desc = "Set kprobe module",
0423 .func = prog_config__module,
0424 },
0425 {
0426 .key = "inlines",
0427 .usage = "inlines=[yes|no] ",
0428 .desc = "Probe at inline symbol",
0429 .func = prog_config__inlines,
0430 },
0431 {
0432 .key = "force",
0433 .usage = "force=[yes|no] ",
0434 .desc = "Forcibly add events with existing name",
0435 .func = prog_config__force,
0436 },
0437 };
0438
0439 static int
0440 do_prog_config(const char *key, const char *value,
0441 struct perf_probe_event *pev)
0442 {
0443 unsigned int i;
0444
0445 pr_debug("config bpf program: %s=%s\n", key, value);
0446 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
0447 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
0448 return bpf_prog_config_terms[i].func(value, pev);
0449
0450 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
0451 key, value);
0452
0453 pr_debug("\nHint: Valid options are:\n");
0454 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
0455 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
0456 bpf_prog_config_terms[i].desc);
0457 pr_debug("\n");
0458
0459 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
0460 }
0461
0462 static const char *
0463 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
0464 {
0465 char *text = strdup(config_str);
0466 char *sep, *line;
0467 const char *main_str = NULL;
0468 int err = 0;
0469
0470 if (!text) {
0471 pr_debug("Not enough memory: dup config_str failed\n");
0472 return ERR_PTR(-ENOMEM);
0473 }
0474
0475 line = text;
0476 while ((sep = strchr(line, ';'))) {
0477 char *equ;
0478
0479 *sep = '\0';
0480 equ = strchr(line, '=');
0481 if (!equ) {
0482 pr_warning("WARNING: invalid config in BPF object: %s\n",
0483 line);
0484 pr_warning("\tShould be 'key=value'.\n");
0485 goto nextline;
0486 }
0487 *equ = '\0';
0488
0489 err = do_prog_config(line, equ + 1, pev);
0490 if (err)
0491 break;
0492 nextline:
0493 line = sep + 1;
0494 }
0495
0496 if (!err)
0497 main_str = config_str + (line - text);
0498 free(text);
0499
0500 return err ? ERR_PTR(err) : main_str;
0501 }
0502
0503 static int
0504 parse_prog_config(const char *config_str, const char **p_main_str,
0505 bool *is_tp, struct perf_probe_event *pev)
0506 {
0507 int err;
0508 const char *main_str = parse_prog_config_kvpair(config_str, pev);
0509
0510 if (IS_ERR(main_str))
0511 return PTR_ERR(main_str);
0512
0513 *p_main_str = main_str;
0514 if (!strchr(main_str, '=')) {
0515
0516 const char *s = strchr(main_str, ':');
0517
0518 if (!s) {
0519 pr_debug("bpf: '%s' is not a valid tracepoint\n",
0520 config_str);
0521 return -BPF_LOADER_ERRNO__CONFIG;
0522 }
0523
0524 *is_tp = true;
0525 return 0;
0526 }
0527
0528 *is_tp = false;
0529 err = parse_perf_probe_command(main_str, pev);
0530 if (err < 0) {
0531 pr_debug("bpf: '%s' is not a valid config string\n",
0532 config_str);
0533
0534 return -BPF_LOADER_ERRNO__CONFIG;
0535 }
0536 return 0;
0537 }
0538
0539 static int
0540 config_bpf_program(struct bpf_program *prog)
0541 {
0542 struct perf_probe_event *pev = NULL;
0543 struct bpf_prog_priv *priv = NULL;
0544 const char *config_str, *main_str;
0545 bool is_tp = false;
0546 int err;
0547
0548
0549 probe_conf.no_inlines = false;
0550 probe_conf.force_add = false;
0551
0552 priv = calloc(sizeof(*priv), 1);
0553 if (!priv) {
0554 pr_debug("bpf: failed to alloc priv\n");
0555 return -ENOMEM;
0556 }
0557 pev = &priv->pev;
0558
0559 config_str = bpf_program__section_name(prog);
0560 pr_debug("bpf: config program '%s'\n", config_str);
0561 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
0562 if (err)
0563 goto errout;
0564
0565 if (is_tp) {
0566 char *s = strchr(main_str, ':');
0567
0568 priv->is_tp = true;
0569 priv->sys_name = strndup(main_str, s - main_str);
0570 priv->evt_name = strdup(s + 1);
0571 goto set_priv;
0572 }
0573
0574 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
0575 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
0576 config_str, PERF_BPF_PROBE_GROUP);
0577 err = -BPF_LOADER_ERRNO__GROUP;
0578 goto errout;
0579 } else if (!pev->group)
0580 pev->group = strdup(PERF_BPF_PROBE_GROUP);
0581
0582 if (!pev->group) {
0583 pr_debug("bpf: strdup failed\n");
0584 err = -ENOMEM;
0585 goto errout;
0586 }
0587
0588 if (!pev->event) {
0589 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
0590 config_str);
0591 err = -BPF_LOADER_ERRNO__EVENTNAME;
0592 goto errout;
0593 }
0594 pr_debug("bpf: config '%s' is ok\n", config_str);
0595
0596 set_priv:
0597 err = program_set_priv(prog, priv);
0598 if (err) {
0599 pr_debug("Failed to set priv for program '%s'\n", config_str);
0600 goto errout;
0601 }
0602
0603 return 0;
0604
0605 errout:
0606 if (pev)
0607 clear_perf_probe_event(pev);
0608 free(priv);
0609 return err;
0610 }
0611
0612 static int bpf__prepare_probe(void)
0613 {
0614 static int err = 0;
0615 static bool initialized = false;
0616
0617
0618
0619
0620
0621
0622 if (initialized)
0623 return err;
0624
0625 initialized = true;
0626 err = init_probe_symbol_maps(false);
0627 if (err < 0)
0628 pr_debug("Failed to init_probe_symbol_maps\n");
0629 probe_conf.max_probes = MAX_PROBES;
0630 return err;
0631 }
0632
0633 static int
0634 preproc_gen_prologue(struct bpf_program *prog, int n,
0635 const struct bpf_insn *orig_insns, int orig_insns_cnt,
0636 struct bpf_preproc_result *res)
0637 {
0638 struct bpf_prog_priv *priv = program_priv(prog);
0639 struct probe_trace_event *tev;
0640 struct perf_probe_event *pev;
0641 struct bpf_insn *buf;
0642 size_t prologue_cnt = 0;
0643 int i, err;
0644
0645 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
0646 goto errout;
0647
0648 pev = &priv->pev;
0649
0650 if (n < 0 || n >= priv->nr_types)
0651 goto errout;
0652
0653
0654 for (i = 0; i < pev->ntevs; i++) {
0655 if (priv->type_mapping[i] == n)
0656 break;
0657 }
0658
0659 if (i >= pev->ntevs) {
0660 pr_debug("Internal error: prologue type %d not found\n", n);
0661 return -BPF_LOADER_ERRNO__PROLOGUE;
0662 }
0663
0664 tev = &pev->tevs[i];
0665
0666 buf = priv->insns_buf;
0667 err = bpf__gen_prologue(tev->args, tev->nargs,
0668 buf, &prologue_cnt,
0669 BPF_MAXINSNS - orig_insns_cnt);
0670 if (err) {
0671 const char *title;
0672
0673 title = bpf_program__section_name(prog);
0674 pr_debug("Failed to generate prologue for program %s\n",
0675 title);
0676 return err;
0677 }
0678
0679 memcpy(&buf[prologue_cnt], orig_insns,
0680 sizeof(struct bpf_insn) * orig_insns_cnt);
0681
0682 res->new_insn_ptr = buf;
0683 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
0684 return 0;
0685
0686 errout:
0687 pr_debug("Internal error in preproc_gen_prologue\n");
0688 return -BPF_LOADER_ERRNO__PROLOGUE;
0689 }
0690
0691
0692
0693
0694
0695 static int compare_tev_args(const void *ptev1, const void *ptev2)
0696 {
0697 int i, ret;
0698 const struct probe_trace_event *tev1 =
0699 *(const struct probe_trace_event **)ptev1;
0700 const struct probe_trace_event *tev2 =
0701 *(const struct probe_trace_event **)ptev2;
0702
0703 ret = tev2->nargs - tev1->nargs;
0704 if (ret)
0705 return ret;
0706
0707 for (i = 0; i < tev1->nargs; i++) {
0708 struct probe_trace_arg *arg1, *arg2;
0709 struct probe_trace_arg_ref *ref1, *ref2;
0710
0711 arg1 = &tev1->args[i];
0712 arg2 = &tev2->args[i];
0713
0714 ret = strcmp(arg1->value, arg2->value);
0715 if (ret)
0716 return ret;
0717
0718 ref1 = arg1->ref;
0719 ref2 = arg2->ref;
0720
0721 while (ref1 && ref2) {
0722 ret = ref2->offset - ref1->offset;
0723 if (ret)
0724 return ret;
0725
0726 ref1 = ref1->next;
0727 ref2 = ref2->next;
0728 }
0729
0730 if (ref1 || ref2)
0731 return ref2 ? 1 : -1;
0732 }
0733
0734 return 0;
0735 }
0736
0737
0738
0739
0740
0741
0742 static int map_prologue(struct perf_probe_event *pev, int *mapping,
0743 int *nr_types)
0744 {
0745 int i, type = 0;
0746 struct probe_trace_event **ptevs;
0747
0748 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
0749
0750 ptevs = malloc(array_sz);
0751 if (!ptevs) {
0752 pr_debug("Not enough memory: alloc ptevs failed\n");
0753 return -ENOMEM;
0754 }
0755
0756 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
0757 for (i = 0; i < pev->ntevs; i++)
0758 ptevs[i] = &pev->tevs[i];
0759
0760 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
0761 compare_tev_args);
0762
0763 for (i = 0; i < pev->ntevs; i++) {
0764 int n;
0765
0766 n = ptevs[i] - pev->tevs;
0767 if (i == 0) {
0768 mapping[n] = type;
0769 pr_debug("mapping[%d]=%d\n", n, type);
0770 continue;
0771 }
0772
0773 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
0774 mapping[n] = type;
0775 else
0776 mapping[n] = ++type;
0777
0778 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
0779 }
0780 free(ptevs);
0781 *nr_types = type + 1;
0782
0783 return 0;
0784 }
0785
0786 static int hook_load_preprocessor(struct bpf_program *prog)
0787 {
0788 struct bpf_prog_priv *priv = program_priv(prog);
0789 struct perf_probe_event *pev;
0790 bool need_prologue = false;
0791 int i;
0792
0793 if (IS_ERR_OR_NULL(priv)) {
0794 pr_debug("Internal error when hook preprocessor\n");
0795 return -BPF_LOADER_ERRNO__INTERNAL;
0796 }
0797
0798 if (priv->is_tp) {
0799 priv->need_prologue = false;
0800 return 0;
0801 }
0802
0803 pev = &priv->pev;
0804 for (i = 0; i < pev->ntevs; i++) {
0805 struct probe_trace_event *tev = &pev->tevs[i];
0806
0807 if (tev->nargs > 0) {
0808 need_prologue = true;
0809 break;
0810 }
0811 }
0812
0813
0814
0815
0816
0817 if (!need_prologue) {
0818 priv->need_prologue = false;
0819 return 0;
0820 }
0821
0822 priv->need_prologue = true;
0823 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
0824 if (!priv->insns_buf) {
0825 pr_debug("Not enough memory: alloc insns_buf failed\n");
0826 return -ENOMEM;
0827 }
0828
0829 priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
0830 if (!priv->prologue_fds) {
0831 pr_debug("Not enough memory: alloc prologue fds failed\n");
0832 return -ENOMEM;
0833 }
0834 memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
0835
0836 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
0837 if (!priv->type_mapping) {
0838 pr_debug("Not enough memory: alloc type_mapping failed\n");
0839 return -ENOMEM;
0840 }
0841 memset(priv->type_mapping, -1,
0842 sizeof(int) * pev->ntevs);
0843
0844 return map_prologue(pev, priv->type_mapping, &priv->nr_types);
0845 }
0846
0847 int bpf__probe(struct bpf_object *obj)
0848 {
0849 int err = 0;
0850 struct bpf_program *prog;
0851 struct bpf_prog_priv *priv;
0852 struct perf_probe_event *pev;
0853
0854 err = bpf__prepare_probe();
0855 if (err) {
0856 pr_debug("bpf__prepare_probe failed\n");
0857 return err;
0858 }
0859
0860 bpf_object__for_each_program(prog, obj) {
0861 err = config_bpf_program(prog);
0862 if (err)
0863 goto out;
0864
0865 priv = program_priv(prog);
0866 if (IS_ERR_OR_NULL(priv)) {
0867 if (!priv)
0868 err = -BPF_LOADER_ERRNO__INTERNAL;
0869 else
0870 err = PTR_ERR(priv);
0871 goto out;
0872 }
0873
0874 if (priv->is_tp) {
0875 bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
0876 continue;
0877 }
0878
0879 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
0880 pev = &priv->pev;
0881
0882 err = convert_perf_probe_events(pev, 1);
0883 if (err < 0) {
0884 pr_debug("bpf_probe: failed to convert perf probe events\n");
0885 goto out;
0886 }
0887
0888 err = apply_perf_probe_events(pev, 1);
0889 if (err < 0) {
0890 pr_debug("bpf_probe: failed to apply perf probe events\n");
0891 goto out;
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 err = hook_load_preprocessor(prog);
0903 if (err)
0904 goto out;
0905 }
0906 out:
0907 return err < 0 ? err : 0;
0908 }
0909
0910 #define EVENTS_WRITE_BUFSIZE 4096
0911 int bpf__unprobe(struct bpf_object *obj)
0912 {
0913 int err, ret = 0;
0914 struct bpf_program *prog;
0915
0916 bpf_object__for_each_program(prog, obj) {
0917 struct bpf_prog_priv *priv = program_priv(prog);
0918 int i;
0919
0920 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
0921 continue;
0922
0923 for (i = 0; i < priv->pev.ntevs; i++) {
0924 struct probe_trace_event *tev = &priv->pev.tevs[i];
0925 char name_buf[EVENTS_WRITE_BUFSIZE];
0926 struct strfilter *delfilter;
0927
0928 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
0929 "%s:%s", tev->group, tev->event);
0930 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
0931
0932 delfilter = strfilter__new(name_buf, NULL);
0933 if (!delfilter) {
0934 pr_debug("Failed to create filter for unprobing\n");
0935 ret = -ENOMEM;
0936 continue;
0937 }
0938
0939 err = del_perf_probe_events(delfilter);
0940 strfilter__delete(delfilter);
0941 if (err) {
0942 pr_debug("Failed to delete %s\n", name_buf);
0943 ret = err;
0944 continue;
0945 }
0946 }
0947 }
0948 return ret;
0949 }
0950
0951 static int bpf_object__load_prologue(struct bpf_object *obj)
0952 {
0953 int init_cnt = ARRAY_SIZE(prologue_init_insn);
0954 const struct bpf_insn *orig_insns;
0955 struct bpf_preproc_result res;
0956 struct perf_probe_event *pev;
0957 struct bpf_program *prog;
0958 int orig_insns_cnt;
0959
0960 bpf_object__for_each_program(prog, obj) {
0961 struct bpf_prog_priv *priv = program_priv(prog);
0962 int err, i, fd;
0963
0964 if (IS_ERR_OR_NULL(priv)) {
0965 pr_debug("bpf: failed to get private field\n");
0966 return -BPF_LOADER_ERRNO__INTERNAL;
0967 }
0968
0969 if (!priv->need_prologue)
0970 continue;
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982 orig_insns = bpf_program__insns(prog);
0983 orig_insns_cnt = bpf_program__insn_cnt(prog);
0984
0985 pev = &priv->pev;
0986 for (i = 0; i < pev->ntevs; i++) {
0987
0988
0989
0990
0991
0992 err = preproc_gen_prologue(prog, i,
0993 orig_insns + init_cnt,
0994 orig_insns_cnt - init_cnt,
0995 &res);
0996 if (err)
0997 return err;
0998
0999 fd = bpf_prog_load(bpf_program__get_type(prog),
1000 bpf_program__name(prog), "GPL",
1001 res.new_insn_ptr,
1002 res.new_insn_cnt, NULL);
1003 if (fd < 0) {
1004 char bf[128];
1005
1006 libbpf_strerror(-errno, bf, sizeof(bf));
1007 pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
1008 -errno, bf);
1009 return -errno;
1010 }
1011 priv->prologue_fds[i] = fd;
1012 }
1013
1014
1015
1016
1017 bpf_program__unload(prog);
1018 }
1019 return 0;
1020 }
1021
1022 int bpf__load(struct bpf_object *obj)
1023 {
1024 int err;
1025
1026 err = bpf_object__load(obj);
1027 if (err) {
1028 char bf[128];
1029 libbpf_strerror(err, bf, sizeof(bf));
1030 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
1031 return err;
1032 }
1033 return bpf_object__load_prologue(obj);
1034 }
1035
1036 int bpf__foreach_event(struct bpf_object *obj,
1037 bpf_prog_iter_callback_t func,
1038 void *arg)
1039 {
1040 struct bpf_program *prog;
1041 int err;
1042
1043 bpf_object__for_each_program(prog, obj) {
1044 struct bpf_prog_priv *priv = program_priv(prog);
1045 struct probe_trace_event *tev;
1046 struct perf_probe_event *pev;
1047 int i, fd;
1048
1049 if (IS_ERR_OR_NULL(priv)) {
1050 pr_debug("bpf: failed to get private field\n");
1051 return -BPF_LOADER_ERRNO__INTERNAL;
1052 }
1053
1054 if (priv->is_tp) {
1055 fd = bpf_program__fd(prog);
1056 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
1057 if (err) {
1058 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
1059 return err;
1060 }
1061 continue;
1062 }
1063
1064 pev = &priv->pev;
1065 for (i = 0; i < pev->ntevs; i++) {
1066 tev = &pev->tevs[i];
1067
1068 if (priv->need_prologue)
1069 fd = priv->prologue_fds[i];
1070 else
1071 fd = bpf_program__fd(prog);
1072
1073 if (fd < 0) {
1074 pr_debug("bpf: failed to get file descriptor\n");
1075 return fd;
1076 }
1077
1078 err = (*func)(tev->group, tev->event, fd, obj, arg);
1079 if (err) {
1080 pr_debug("bpf: call back failed, stop iterate\n");
1081 return err;
1082 }
1083 }
1084 }
1085 return 0;
1086 }
1087
1088 enum bpf_map_op_type {
1089 BPF_MAP_OP_SET_VALUE,
1090 BPF_MAP_OP_SET_EVSEL,
1091 };
1092
1093 enum bpf_map_key_type {
1094 BPF_MAP_KEY_ALL,
1095 BPF_MAP_KEY_RANGES,
1096 };
1097
1098 struct bpf_map_op {
1099 struct list_head list;
1100 enum bpf_map_op_type op_type;
1101 enum bpf_map_key_type key_type;
1102 union {
1103 struct parse_events_array array;
1104 } k;
1105 union {
1106 u64 value;
1107 struct evsel *evsel;
1108 } v;
1109 };
1110
1111 struct bpf_map_priv {
1112 struct list_head ops_list;
1113 };
1114
1115 static void
1116 bpf_map_op__delete(struct bpf_map_op *op)
1117 {
1118 if (!list_empty(&op->list))
1119 list_del_init(&op->list);
1120 if (op->key_type == BPF_MAP_KEY_RANGES)
1121 parse_events__clear_array(&op->k.array);
1122 free(op);
1123 }
1124
1125 static void
1126 bpf_map_priv__purge(struct bpf_map_priv *priv)
1127 {
1128 struct bpf_map_op *pos, *n;
1129
1130 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
1131 list_del_init(&pos->list);
1132 bpf_map_op__delete(pos);
1133 }
1134 }
1135
1136 static void
1137 bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
1138 void *_priv)
1139 {
1140 struct bpf_map_priv *priv = _priv;
1141
1142 bpf_map_priv__purge(priv);
1143 free(priv);
1144 }
1145
1146 static void *map_priv(const struct bpf_map *map)
1147 {
1148 void *priv;
1149
1150 if (IS_ERR_OR_NULL(bpf_map_hash))
1151 return NULL;
1152 if (!hashmap__find(bpf_map_hash, map, &priv))
1153 return NULL;
1154 return priv;
1155 }
1156
1157 static void bpf_map_hash_free(void)
1158 {
1159 struct hashmap_entry *cur;
1160 size_t bkt;
1161
1162 if (IS_ERR_OR_NULL(bpf_map_hash))
1163 return;
1164
1165 hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1166 bpf_map_priv__clear(cur->key, cur->value);
1167
1168 hashmap__free(bpf_map_hash);
1169 bpf_map_hash = NULL;
1170 }
1171
1172 static int map_set_priv(struct bpf_map *map, void *priv)
1173 {
1174 void *old_priv;
1175
1176 if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1177 return PTR_ERR(bpf_program_hash);
1178
1179 if (!bpf_map_hash) {
1180 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1181 if (IS_ERR(bpf_map_hash))
1182 return PTR_ERR(bpf_map_hash);
1183 }
1184
1185 old_priv = map_priv(map);
1186 if (old_priv) {
1187 bpf_map_priv__clear(map, old_priv);
1188 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1189 }
1190 return hashmap__add(bpf_map_hash, map, priv);
1191 }
1192
1193 static int
1194 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1195 {
1196 op->key_type = BPF_MAP_KEY_ALL;
1197 if (!term)
1198 return 0;
1199
1200 if (term->array.nr_ranges) {
1201 size_t memsz = term->array.nr_ranges *
1202 sizeof(op->k.array.ranges[0]);
1203
1204 op->k.array.ranges = memdup(term->array.ranges, memsz);
1205 if (!op->k.array.ranges) {
1206 pr_debug("Not enough memory to alloc indices for map\n");
1207 return -ENOMEM;
1208 }
1209 op->key_type = BPF_MAP_KEY_RANGES;
1210 op->k.array.nr_ranges = term->array.nr_ranges;
1211 }
1212 return 0;
1213 }
1214
1215 static struct bpf_map_op *
1216 bpf_map_op__new(struct parse_events_term *term)
1217 {
1218 struct bpf_map_op *op;
1219 int err;
1220
1221 op = zalloc(sizeof(*op));
1222 if (!op) {
1223 pr_debug("Failed to alloc bpf_map_op\n");
1224 return ERR_PTR(-ENOMEM);
1225 }
1226 INIT_LIST_HEAD(&op->list);
1227
1228 err = bpf_map_op_setkey(op, term);
1229 if (err) {
1230 free(op);
1231 return ERR_PTR(err);
1232 }
1233 return op;
1234 }
1235
1236 static struct bpf_map_op *
1237 bpf_map_op__clone(struct bpf_map_op *op)
1238 {
1239 struct bpf_map_op *newop;
1240
1241 newop = memdup(op, sizeof(*op));
1242 if (!newop) {
1243 pr_debug("Failed to alloc bpf_map_op\n");
1244 return NULL;
1245 }
1246
1247 INIT_LIST_HEAD(&newop->list);
1248 if (op->key_type == BPF_MAP_KEY_RANGES) {
1249 size_t memsz = op->k.array.nr_ranges *
1250 sizeof(op->k.array.ranges[0]);
1251
1252 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1253 if (!newop->k.array.ranges) {
1254 pr_debug("Failed to alloc indices for map\n");
1255 free(newop);
1256 return NULL;
1257 }
1258 }
1259
1260 return newop;
1261 }
1262
1263 static struct bpf_map_priv *
1264 bpf_map_priv__clone(struct bpf_map_priv *priv)
1265 {
1266 struct bpf_map_priv *newpriv;
1267 struct bpf_map_op *pos, *newop;
1268
1269 newpriv = zalloc(sizeof(*newpriv));
1270 if (!newpriv) {
1271 pr_debug("Not enough memory to alloc map private\n");
1272 return NULL;
1273 }
1274 INIT_LIST_HEAD(&newpriv->ops_list);
1275
1276 list_for_each_entry(pos, &priv->ops_list, list) {
1277 newop = bpf_map_op__clone(pos);
1278 if (!newop) {
1279 bpf_map_priv__purge(newpriv);
1280 return NULL;
1281 }
1282 list_add_tail(&newop->list, &newpriv->ops_list);
1283 }
1284
1285 return newpriv;
1286 }
1287
1288 static int
1289 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1290 {
1291 const char *map_name = bpf_map__name(map);
1292 struct bpf_map_priv *priv = map_priv(map);
1293
1294 if (IS_ERR(priv)) {
1295 pr_debug("Failed to get private from map %s\n", map_name);
1296 return PTR_ERR(priv);
1297 }
1298
1299 if (!priv) {
1300 priv = zalloc(sizeof(*priv));
1301 if (!priv) {
1302 pr_debug("Not enough memory to alloc map private\n");
1303 return -ENOMEM;
1304 }
1305 INIT_LIST_HEAD(&priv->ops_list);
1306
1307 if (map_set_priv(map, priv)) {
1308 free(priv);
1309 return -BPF_LOADER_ERRNO__INTERNAL;
1310 }
1311 }
1312
1313 list_add_tail(&op->list, &priv->ops_list);
1314 return 0;
1315 }
1316
1317 static struct bpf_map_op *
1318 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1319 {
1320 struct bpf_map_op *op;
1321 int err;
1322
1323 op = bpf_map_op__new(term);
1324 if (IS_ERR(op))
1325 return op;
1326
1327 err = bpf_map__add_op(map, op);
1328 if (err) {
1329 bpf_map_op__delete(op);
1330 return ERR_PTR(err);
1331 }
1332 return op;
1333 }
1334
1335 static int
1336 __bpf_map__config_value(struct bpf_map *map,
1337 struct parse_events_term *term)
1338 {
1339 struct bpf_map_op *op;
1340 const char *map_name = bpf_map__name(map);
1341
1342 if (!map) {
1343 pr_debug("Map '%s' is invalid\n", map_name);
1344 return -BPF_LOADER_ERRNO__INTERNAL;
1345 }
1346
1347 if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1348 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1349 map_name);
1350 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1351 }
1352 if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1353 pr_debug("Map %s has incorrect key size\n", map_name);
1354 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1355 }
1356 switch (bpf_map__value_size(map)) {
1357 case 1:
1358 case 2:
1359 case 4:
1360 case 8:
1361 break;
1362 default:
1363 pr_debug("Map %s has incorrect value size\n", map_name);
1364 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1365 }
1366
1367 op = bpf_map__add_newop(map, term);
1368 if (IS_ERR(op))
1369 return PTR_ERR(op);
1370 op->op_type = BPF_MAP_OP_SET_VALUE;
1371 op->v.value = term->val.num;
1372 return 0;
1373 }
1374
1375 static int
1376 bpf_map__config_value(struct bpf_map *map,
1377 struct parse_events_term *term,
1378 struct evlist *evlist __maybe_unused)
1379 {
1380 if (!term->err_val) {
1381 pr_debug("Config value not set\n");
1382 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1383 }
1384
1385 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1386 pr_debug("ERROR: wrong value type for 'value'\n");
1387 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1388 }
1389
1390 return __bpf_map__config_value(map, term);
1391 }
1392
1393 static int
1394 __bpf_map__config_event(struct bpf_map *map,
1395 struct parse_events_term *term,
1396 struct evlist *evlist)
1397 {
1398 struct bpf_map_op *op;
1399 const char *map_name = bpf_map__name(map);
1400 struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1401
1402 if (!evsel) {
1403 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1404 map_name, term->val.str);
1405 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1406 }
1407
1408 if (!map) {
1409 pr_debug("Map '%s' is invalid\n", map_name);
1410 return PTR_ERR(map);
1411 }
1412
1413
1414
1415
1416
1417 if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1418 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1419 map_name);
1420 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1421 }
1422
1423 op = bpf_map__add_newop(map, term);
1424 if (IS_ERR(op))
1425 return PTR_ERR(op);
1426 op->op_type = BPF_MAP_OP_SET_EVSEL;
1427 op->v.evsel = evsel;
1428 return 0;
1429 }
1430
1431 static int
1432 bpf_map__config_event(struct bpf_map *map,
1433 struct parse_events_term *term,
1434 struct evlist *evlist)
1435 {
1436 if (!term->err_val) {
1437 pr_debug("Config value not set\n");
1438 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1439 }
1440
1441 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1442 pr_debug("ERROR: wrong value type for 'event'\n");
1443 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1444 }
1445
1446 return __bpf_map__config_event(map, term, evlist);
1447 }
1448
1449 struct bpf_obj_config__map_func {
1450 const char *config_opt;
1451 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1452 struct evlist *);
1453 };
1454
1455 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1456 {"value", bpf_map__config_value},
1457 {"event", bpf_map__config_event},
1458 };
1459
1460 static int
1461 config_map_indices_range_check(struct parse_events_term *term,
1462 struct bpf_map *map,
1463 const char *map_name)
1464 {
1465 struct parse_events_array *array = &term->array;
1466 unsigned int i;
1467
1468 if (!array->nr_ranges)
1469 return 0;
1470 if (!array->ranges) {
1471 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1472 map_name, (int)array->nr_ranges);
1473 return -BPF_LOADER_ERRNO__INTERNAL;
1474 }
1475
1476 if (!map) {
1477 pr_debug("Map '%s' is invalid\n", map_name);
1478 return -BPF_LOADER_ERRNO__INTERNAL;
1479 }
1480
1481 for (i = 0; i < array->nr_ranges; i++) {
1482 unsigned int start = array->ranges[i].start;
1483 size_t length = array->ranges[i].length;
1484 unsigned int idx = start + length - 1;
1485
1486 if (idx >= bpf_map__max_entries(map)) {
1487 pr_debug("ERROR: index %d too large\n", idx);
1488 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1489 }
1490 }
1491 return 0;
1492 }
1493
1494 static int
1495 bpf__obj_config_map(struct bpf_object *obj,
1496 struct parse_events_term *term,
1497 struct evlist *evlist,
1498 int *key_scan_pos)
1499 {
1500
1501 char *map_name = strdup(term->config + sizeof("map:") - 1);
1502 struct bpf_map *map;
1503 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1504 char *map_opt;
1505 size_t i;
1506
1507 if (!map_name)
1508 return -ENOMEM;
1509
1510 map_opt = strchr(map_name, '.');
1511 if (!map_opt) {
1512 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1513 goto out;
1514 }
1515
1516 *map_opt++ = '\0';
1517 if (*map_opt == '\0') {
1518 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1519 goto out;
1520 }
1521
1522 map = bpf_object__find_map_by_name(obj, map_name);
1523 if (!map) {
1524 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1525 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1526 goto out;
1527 }
1528
1529 *key_scan_pos += strlen(map_opt);
1530 err = config_map_indices_range_check(term, map, map_name);
1531 if (err)
1532 goto out;
1533 *key_scan_pos -= strlen(map_opt);
1534
1535 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1536 struct bpf_obj_config__map_func *func =
1537 &bpf_obj_config__map_funcs[i];
1538
1539 if (strcmp(map_opt, func->config_opt) == 0) {
1540 err = func->config_func(map, term, evlist);
1541 goto out;
1542 }
1543 }
1544
1545 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1546 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1547 out:
1548 if (!err)
1549 *key_scan_pos += strlen(map_opt);
1550
1551 free(map_name);
1552 return err;
1553 }
1554
1555 int bpf__config_obj(struct bpf_object *obj,
1556 struct parse_events_term *term,
1557 struct evlist *evlist,
1558 int *error_pos)
1559 {
1560 int key_scan_pos = 0;
1561 int err;
1562
1563 if (!obj || !term || !term->config)
1564 return -EINVAL;
1565
1566 if (strstarts(term->config, "map:")) {
1567 key_scan_pos = sizeof("map:") - 1;
1568 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1569 goto out;
1570 }
1571 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1572 out:
1573 if (error_pos)
1574 *error_pos = key_scan_pos;
1575 return err;
1576
1577 }
1578
1579 typedef int (*map_config_func_t)(const char *name, int map_fd,
1580 const struct bpf_map *map,
1581 struct bpf_map_op *op,
1582 void *pkey, void *arg);
1583
1584 static int
1585 foreach_key_array_all(map_config_func_t func,
1586 void *arg, const char *name,
1587 int map_fd, const struct bpf_map *map,
1588 struct bpf_map_op *op)
1589 {
1590 unsigned int i;
1591 int err;
1592
1593 for (i = 0; i < bpf_map__max_entries(map); i++) {
1594 err = func(name, map_fd, map, op, &i, arg);
1595 if (err) {
1596 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1597 name, i);
1598 return err;
1599 }
1600 }
1601 return 0;
1602 }
1603
1604 static int
1605 foreach_key_array_ranges(map_config_func_t func, void *arg,
1606 const char *name, int map_fd,
1607 const struct bpf_map *map,
1608 struct bpf_map_op *op)
1609 {
1610 unsigned int i, j;
1611 int err;
1612
1613 for (i = 0; i < op->k.array.nr_ranges; i++) {
1614 unsigned int start = op->k.array.ranges[i].start;
1615 size_t length = op->k.array.ranges[i].length;
1616
1617 for (j = 0; j < length; j++) {
1618 unsigned int idx = start + j;
1619
1620 err = func(name, map_fd, map, op, &idx, arg);
1621 if (err) {
1622 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1623 name, idx);
1624 return err;
1625 }
1626 }
1627 }
1628 return 0;
1629 }
1630
1631 static int
1632 bpf_map_config_foreach_key(struct bpf_map *map,
1633 map_config_func_t func,
1634 void *arg)
1635 {
1636 int err, map_fd, type;
1637 struct bpf_map_op *op;
1638 const char *name = bpf_map__name(map);
1639 struct bpf_map_priv *priv = map_priv(map);
1640
1641 if (IS_ERR(priv)) {
1642 pr_debug("ERROR: failed to get private from map %s\n", name);
1643 return -BPF_LOADER_ERRNO__INTERNAL;
1644 }
1645 if (!priv || list_empty(&priv->ops_list)) {
1646 pr_debug("INFO: nothing to config for map %s\n", name);
1647 return 0;
1648 }
1649
1650 if (!map) {
1651 pr_debug("Map '%s' is invalid\n", name);
1652 return -BPF_LOADER_ERRNO__INTERNAL;
1653 }
1654 map_fd = bpf_map__fd(map);
1655 if (map_fd < 0) {
1656 pr_debug("ERROR: failed to get fd from map %s\n", name);
1657 return map_fd;
1658 }
1659
1660 type = bpf_map__type(map);
1661 list_for_each_entry(op, &priv->ops_list, list) {
1662 switch (type) {
1663 case BPF_MAP_TYPE_ARRAY:
1664 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1665 switch (op->key_type) {
1666 case BPF_MAP_KEY_ALL:
1667 err = foreach_key_array_all(func, arg, name,
1668 map_fd, map, op);
1669 break;
1670 case BPF_MAP_KEY_RANGES:
1671 err = foreach_key_array_ranges(func, arg, name,
1672 map_fd, map, op);
1673 break;
1674 default:
1675 pr_debug("ERROR: keytype for map '%s' invalid\n",
1676 name);
1677 return -BPF_LOADER_ERRNO__INTERNAL;
1678 }
1679 if (err)
1680 return err;
1681 break;
1682 default:
1683 pr_debug("ERROR: type of '%s' incorrect\n", name);
1684 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1685 }
1686 }
1687
1688 return 0;
1689 }
1690
1691 static int
1692 apply_config_value_for_key(int map_fd, void *pkey,
1693 size_t val_size, u64 val)
1694 {
1695 int err = 0;
1696
1697 switch (val_size) {
1698 case 1: {
1699 u8 _val = (u8)(val);
1700 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1701 break;
1702 }
1703 case 2: {
1704 u16 _val = (u16)(val);
1705 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1706 break;
1707 }
1708 case 4: {
1709 u32 _val = (u32)(val);
1710 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1711 break;
1712 }
1713 case 8: {
1714 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1715 break;
1716 }
1717 default:
1718 pr_debug("ERROR: invalid value size\n");
1719 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1720 }
1721 if (err && errno)
1722 err = -errno;
1723 return err;
1724 }
1725
1726 static int
1727 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1728 struct evsel *evsel)
1729 {
1730 struct xyarray *xy = evsel->core.fd;
1731 struct perf_event_attr *attr;
1732 unsigned int key, events;
1733 bool check_pass = false;
1734 int *evt_fd;
1735 int err;
1736
1737 if (!xy) {
1738 pr_debug("ERROR: evsel not ready for map %s\n", name);
1739 return -BPF_LOADER_ERRNO__INTERNAL;
1740 }
1741
1742 if (xy->row_size / xy->entry_size != 1) {
1743 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1744 name);
1745 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1746 }
1747
1748 attr = &evsel->core.attr;
1749 if (attr->inherit) {
1750 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1751 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1752 }
1753
1754 if (evsel__is_bpf_output(evsel))
1755 check_pass = true;
1756 if (attr->type == PERF_TYPE_RAW)
1757 check_pass = true;
1758 if (attr->type == PERF_TYPE_HARDWARE)
1759 check_pass = true;
1760 if (!check_pass) {
1761 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1762 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1763 }
1764
1765 events = xy->entries / (xy->row_size / xy->entry_size);
1766 key = *((unsigned int *)pkey);
1767 if (key >= events) {
1768 pr_debug("ERROR: there is no event %d for map %s\n",
1769 key, name);
1770 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1771 }
1772 evt_fd = xyarray__entry(xy, key, 0);
1773 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1774 if (err && errno)
1775 err = -errno;
1776 return err;
1777 }
1778
1779 static int
1780 apply_obj_config_map_for_key(const char *name, int map_fd,
1781 const struct bpf_map *map,
1782 struct bpf_map_op *op,
1783 void *pkey, void *arg __maybe_unused)
1784 {
1785 int err;
1786
1787 switch (op->op_type) {
1788 case BPF_MAP_OP_SET_VALUE:
1789 err = apply_config_value_for_key(map_fd, pkey,
1790 bpf_map__value_size(map),
1791 op->v.value);
1792 break;
1793 case BPF_MAP_OP_SET_EVSEL:
1794 err = apply_config_evsel_for_key(name, map_fd, pkey,
1795 op->v.evsel);
1796 break;
1797 default:
1798 pr_debug("ERROR: unknown value type for '%s'\n", name);
1799 err = -BPF_LOADER_ERRNO__INTERNAL;
1800 }
1801 return err;
1802 }
1803
1804 static int
1805 apply_obj_config_map(struct bpf_map *map)
1806 {
1807 return bpf_map_config_foreach_key(map,
1808 apply_obj_config_map_for_key,
1809 NULL);
1810 }
1811
1812 static int
1813 apply_obj_config_object(struct bpf_object *obj)
1814 {
1815 struct bpf_map *map;
1816 int err;
1817
1818 bpf_object__for_each_map(map, obj) {
1819 err = apply_obj_config_map(map);
1820 if (err)
1821 return err;
1822 }
1823 return 0;
1824 }
1825
1826 int bpf__apply_obj_config(void)
1827 {
1828 struct bpf_perf_object *perf_obj, *tmp;
1829 int err;
1830
1831 bpf_perf_object__for_each(perf_obj, tmp) {
1832 err = apply_obj_config_object(perf_obj->obj);
1833 if (err)
1834 return err;
1835 }
1836
1837 return 0;
1838 }
1839
1840 #define bpf__perf_for_each_map(map, pobj, tmp) \
1841 bpf_perf_object__for_each(pobj, tmp) \
1842 bpf_object__for_each_map(map, pobj->obj)
1843
1844 #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \
1845 bpf__perf_for_each_map(map, pobj, pobjtmp) \
1846 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1847
1848 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1849 {
1850 struct bpf_map_priv *tmpl_priv = NULL;
1851 struct bpf_perf_object *perf_obj, *tmp;
1852 struct evsel *evsel = NULL;
1853 struct bpf_map *map;
1854 int err;
1855 bool need_init = false;
1856
1857 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1858 struct bpf_map_priv *priv = map_priv(map);
1859
1860 if (IS_ERR(priv))
1861 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1862
1863
1864
1865
1866
1867 if (!need_init && !priv)
1868 need_init = !priv;
1869 if (!tmpl_priv && priv)
1870 tmpl_priv = priv;
1871 }
1872
1873 if (!need_init)
1874 return NULL;
1875
1876 if (!tmpl_priv) {
1877 char *event_definition = NULL;
1878
1879 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1880 return ERR_PTR(-ENOMEM);
1881
1882 err = parse_event(evlist, event_definition);
1883 free(event_definition);
1884
1885 if (err) {
1886 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1887 return ERR_PTR(-err);
1888 }
1889
1890 evsel = evlist__last(evlist);
1891 }
1892
1893 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1894 struct bpf_map_priv *priv = map_priv(map);
1895
1896 if (IS_ERR(priv))
1897 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1898 if (priv)
1899 continue;
1900
1901 if (tmpl_priv) {
1902 priv = bpf_map_priv__clone(tmpl_priv);
1903 if (!priv)
1904 return ERR_PTR(-ENOMEM);
1905
1906 err = map_set_priv(map, priv);
1907 if (err) {
1908 bpf_map_priv__clear(map, priv);
1909 return ERR_PTR(err);
1910 }
1911 } else if (evsel) {
1912 struct bpf_map_op *op;
1913
1914 op = bpf_map__add_newop(map, NULL);
1915 if (IS_ERR(op))
1916 return ERR_CAST(op);
1917 op->op_type = BPF_MAP_OP_SET_EVSEL;
1918 op->v.evsel = evsel;
1919 }
1920 }
1921
1922 return evsel;
1923 }
1924
1925 int bpf__setup_stdout(struct evlist *evlist)
1926 {
1927 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1928 return PTR_ERR_OR_ZERO(evsel);
1929 }
1930
1931 #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1932 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1933 #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1934
1935 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1936 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1937 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1938 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1939 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1940 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1941 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1942 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1943 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1944 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1945 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1946 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1947 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1948 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1949 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1950 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1951 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1952 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1953 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1954 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1955 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1956 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1957 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1958 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1959 };
1960
1961 static int
1962 bpf_loader_strerror(int err, char *buf, size_t size)
1963 {
1964 char sbuf[STRERR_BUFSIZE];
1965 const char *msg;
1966
1967 if (!buf || !size)
1968 return -1;
1969
1970 err = err > 0 ? err : -err;
1971
1972 if (err >= __LIBBPF_ERRNO__START)
1973 return libbpf_strerror(err, buf, size);
1974
1975 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1976 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1977 snprintf(buf, size, "%s", msg);
1978 buf[size - 1] = '\0';
1979 return 0;
1980 }
1981
1982 if (err >= __BPF_LOADER_ERRNO__END)
1983 snprintf(buf, size, "Unknown bpf loader error %d", err);
1984 else
1985 snprintf(buf, size, "%s",
1986 str_error_r(err, sbuf, sizeof(sbuf)));
1987
1988 buf[size - 1] = '\0';
1989 return -1;
1990 }
1991
1992 #define bpf__strerror_head(err, buf, size) \
1993 char sbuf[STRERR_BUFSIZE], *emsg;\
1994 if (!size)\
1995 return 0;\
1996 if (err < 0)\
1997 err = -err;\
1998 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1999 emsg = sbuf;\
2000 switch (err) {\
2001 default:\
2002 scnprintf(buf, size, "%s", emsg);\
2003 break;
2004
2005 #define bpf__strerror_entry(val, fmt...)\
2006 case val: {\
2007 scnprintf(buf, size, fmt);\
2008 break;\
2009 }
2010
2011 #define bpf__strerror_end(buf, size)\
2012 }\
2013 buf[size - 1] = '\0';
2014
2015 int bpf__strerror_prepare_load(const char *filename, bool source,
2016 int err, char *buf, size_t size)
2017 {
2018 size_t n;
2019 int ret;
2020
2021 n = snprintf(buf, size, "Failed to load %s%s: ",
2022 filename, source ? " from source" : "");
2023 if (n >= size) {
2024 buf[size - 1] = '\0';
2025 return 0;
2026 }
2027 buf += n;
2028 size -= n;
2029
2030 ret = bpf_loader_strerror(err, buf, size);
2031 buf[size - 1] = '\0';
2032 return ret;
2033 }
2034
2035 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
2036 int err, char *buf, size_t size)
2037 {
2038 bpf__strerror_head(err, buf, size);
2039 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
2040 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
2041 break;
2042 }
2043 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
2044 bpf__strerror_entry(EACCES, "You need to be root");
2045 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
2046 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
2047 bpf__strerror_end(buf, size);
2048 return 0;
2049 }
2050
2051 int bpf__strerror_load(struct bpf_object *obj,
2052 int err, char *buf, size_t size)
2053 {
2054 bpf__strerror_head(err, buf, size);
2055 case LIBBPF_ERRNO__KVER: {
2056 unsigned int obj_kver = bpf_object__kversion(obj);
2057 unsigned int real_kver;
2058
2059 if (fetch_kernel_version(&real_kver, NULL, 0)) {
2060 scnprintf(buf, size, "Unable to fetch kernel version");
2061 break;
2062 }
2063
2064 if (obj_kver != real_kver) {
2065 scnprintf(buf, size,
2066 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
2067 KVER_PARAM(obj_kver),
2068 KVER_PARAM(real_kver));
2069 break;
2070 }
2071
2072 scnprintf(buf, size, "Failed to load program for unknown reason");
2073 break;
2074 }
2075 bpf__strerror_end(buf, size);
2076 return 0;
2077 }
2078
2079 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
2080 struct parse_events_term *term __maybe_unused,
2081 struct evlist *evlist __maybe_unused,
2082 int *error_pos __maybe_unused, int err,
2083 char *buf, size_t size)
2084 {
2085 bpf__strerror_head(err, buf, size);
2086 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
2087 "Can't use this config term with this map type");
2088 bpf__strerror_end(buf, size);
2089 return 0;
2090 }
2091
2092 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
2093 {
2094 bpf__strerror_head(err, buf, size);
2095 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
2096 "Cannot set event to BPF map in multi-thread tracing");
2097 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
2098 "%s (Hint: use -i to turn off inherit)", emsg);
2099 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
2100 "Can only put raw, hardware and BPF output event into a BPF map");
2101 bpf__strerror_end(buf, size);
2102 return 0;
2103 }
2104
2105 int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
2106 int err, char *buf, size_t size)
2107 {
2108 bpf__strerror_head(err, buf, size);
2109 bpf__strerror_end(buf, size);
2110 return 0;
2111 }