0001
0002
0003
0004 #ifdef __KERNEL__
0005 #include <linux/bpf.h>
0006 #include <linux/btf.h>
0007 #include <linux/string.h>
0008 #include <linux/bpf_verifier.h>
0009 #include "relo_core.h"
0010
0011 static const char *btf_kind_str(const struct btf_type *t)
0012 {
0013 return btf_type_str(t);
0014 }
0015
0016 static bool is_ldimm64_insn(struct bpf_insn *insn)
0017 {
0018 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
0019 }
0020
0021 static const struct btf_type *
0022 skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
0023 {
0024 return btf_type_skip_modifiers(btf, id, res_id);
0025 }
0026
0027 static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
0028 {
0029 return btf_name_by_offset(btf, offset);
0030 }
0031
0032 static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
0033 {
0034 const struct btf_type *t;
0035 int size;
0036
0037 t = btf_type_by_id(btf, type_id);
0038 t = btf_resolve_size(btf, t, &size);
0039 if (IS_ERR(t))
0040 return PTR_ERR(t);
0041 return size;
0042 }
0043
0044 enum libbpf_print_level {
0045 LIBBPF_WARN,
0046 LIBBPF_INFO,
0047 LIBBPF_DEBUG,
0048 };
0049
0050 #undef pr_warn
0051 #undef pr_info
0052 #undef pr_debug
0053 #define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
0054 #define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
0055 #define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
0056 #define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
0057 #else
0058 #include <stdio.h>
0059 #include <string.h>
0060 #include <errno.h>
0061 #include <ctype.h>
0062 #include <linux/err.h>
0063
0064 #include "libbpf.h"
0065 #include "bpf.h"
0066 #include "btf.h"
0067 #include "str_error.h"
0068 #include "libbpf_internal.h"
0069 #endif
0070
0071 static bool is_flex_arr(const struct btf *btf,
0072 const struct bpf_core_accessor *acc,
0073 const struct btf_array *arr)
0074 {
0075 const struct btf_type *t;
0076
0077
0078 if (!acc->name || arr->nelems > 0)
0079 return false;
0080
0081
0082 t = btf_type_by_id(btf, acc->type_id);
0083 return acc->idx == btf_vlen(t) - 1;
0084 }
0085
0086 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
0087 {
0088 switch (kind) {
0089 case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
0090 case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
0091 case BPF_CORE_FIELD_EXISTS: return "field_exists";
0092 case BPF_CORE_FIELD_SIGNED: return "signed";
0093 case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
0094 case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
0095 case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
0096 case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
0097 case BPF_CORE_TYPE_EXISTS: return "type_exists";
0098 case BPF_CORE_TYPE_MATCHES: return "type_matches";
0099 case BPF_CORE_TYPE_SIZE: return "type_size";
0100 case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
0101 case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
0102 default: return "unknown";
0103 }
0104 }
0105
0106 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
0107 {
0108 switch (kind) {
0109 case BPF_CORE_FIELD_BYTE_OFFSET:
0110 case BPF_CORE_FIELD_BYTE_SIZE:
0111 case BPF_CORE_FIELD_EXISTS:
0112 case BPF_CORE_FIELD_SIGNED:
0113 case BPF_CORE_FIELD_LSHIFT_U64:
0114 case BPF_CORE_FIELD_RSHIFT_U64:
0115 return true;
0116 default:
0117 return false;
0118 }
0119 }
0120
0121 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
0122 {
0123 switch (kind) {
0124 case BPF_CORE_TYPE_ID_LOCAL:
0125 case BPF_CORE_TYPE_ID_TARGET:
0126 case BPF_CORE_TYPE_EXISTS:
0127 case BPF_CORE_TYPE_MATCHES:
0128 case BPF_CORE_TYPE_SIZE:
0129 return true;
0130 default:
0131 return false;
0132 }
0133 }
0134
0135 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
0136 {
0137 switch (kind) {
0138 case BPF_CORE_ENUMVAL_EXISTS:
0139 case BPF_CORE_ENUMVAL_VALUE:
0140 return true;
0141 default:
0142 return false;
0143 }
0144 }
0145
0146 int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
0147 const struct btf *targ_btf, __u32 targ_id, int level)
0148 {
0149 const struct btf_type *local_type, *targ_type;
0150 int depth = 32;
0151
0152
0153 local_type = btf_type_by_id(local_btf, local_id);
0154 targ_type = btf_type_by_id(targ_btf, targ_id);
0155 if (!btf_kind_core_compat(local_type, targ_type))
0156 return 0;
0157
0158 recur:
0159 depth--;
0160 if (depth < 0)
0161 return -EINVAL;
0162
0163 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
0164 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
0165 if (!local_type || !targ_type)
0166 return -EINVAL;
0167
0168 if (!btf_kind_core_compat(local_type, targ_type))
0169 return 0;
0170
0171 switch (btf_kind(local_type)) {
0172 case BTF_KIND_UNKN:
0173 case BTF_KIND_STRUCT:
0174 case BTF_KIND_UNION:
0175 case BTF_KIND_ENUM:
0176 case BTF_KIND_FWD:
0177 case BTF_KIND_ENUM64:
0178 return 1;
0179 case BTF_KIND_INT:
0180
0181
0182
0183 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
0184 case BTF_KIND_PTR:
0185 local_id = local_type->type;
0186 targ_id = targ_type->type;
0187 goto recur;
0188 case BTF_KIND_ARRAY:
0189 local_id = btf_array(local_type)->type;
0190 targ_id = btf_array(targ_type)->type;
0191 goto recur;
0192 case BTF_KIND_FUNC_PROTO: {
0193 struct btf_param *local_p = btf_params(local_type);
0194 struct btf_param *targ_p = btf_params(targ_type);
0195 __u16 local_vlen = btf_vlen(local_type);
0196 __u16 targ_vlen = btf_vlen(targ_type);
0197 int i, err;
0198
0199 if (local_vlen != targ_vlen)
0200 return 0;
0201
0202 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
0203 if (level <= 0)
0204 return -EINVAL;
0205
0206 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
0207 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
0208 err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
0209 level - 1);
0210 if (err <= 0)
0211 return err;
0212 }
0213
0214
0215 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
0216 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
0217 goto recur;
0218 }
0219 default:
0220 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
0221 btf_kind_str(local_type), local_id, targ_id);
0222 return 0;
0223 }
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
0264 const struct bpf_core_relo *relo,
0265 struct bpf_core_spec *spec)
0266 {
0267 int access_idx, parsed_len, i;
0268 struct bpf_core_accessor *acc;
0269 const struct btf_type *t;
0270 const char *name, *spec_str;
0271 __u32 id, name_off;
0272 __s64 sz;
0273
0274 spec_str = btf__name_by_offset(btf, relo->access_str_off);
0275 if (str_is_empty(spec_str) || *spec_str == ':')
0276 return -EINVAL;
0277
0278 memset(spec, 0, sizeof(*spec));
0279 spec->btf = btf;
0280 spec->root_type_id = relo->type_id;
0281 spec->relo_kind = relo->kind;
0282
0283
0284 if (core_relo_is_type_based(relo->kind)) {
0285 if (strcmp(spec_str, "0"))
0286 return -EINVAL;
0287 return 0;
0288 }
0289
0290
0291 while (*spec_str) {
0292 if (*spec_str == ':')
0293 ++spec_str;
0294 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
0295 return -EINVAL;
0296 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
0297 return -E2BIG;
0298 spec_str += parsed_len;
0299 spec->raw_spec[spec->raw_len++] = access_idx;
0300 }
0301
0302 if (spec->raw_len == 0)
0303 return -EINVAL;
0304
0305 t = skip_mods_and_typedefs(btf, relo->type_id, &id);
0306 if (!t)
0307 return -EINVAL;
0308
0309 access_idx = spec->raw_spec[0];
0310 acc = &spec->spec[0];
0311 acc->type_id = id;
0312 acc->idx = access_idx;
0313 spec->len++;
0314
0315 if (core_relo_is_enumval_based(relo->kind)) {
0316 if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
0317 return -EINVAL;
0318
0319
0320 name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off
0321 : btf_enum64(t)[access_idx].name_off;
0322 acc->name = btf__name_by_offset(btf, name_off);
0323 return 0;
0324 }
0325
0326 if (!core_relo_is_field_based(relo->kind))
0327 return -EINVAL;
0328
0329 sz = btf__resolve_size(btf, id);
0330 if (sz < 0)
0331 return sz;
0332 spec->bit_offset = access_idx * sz * 8;
0333
0334 for (i = 1; i < spec->raw_len; i++) {
0335 t = skip_mods_and_typedefs(btf, id, &id);
0336 if (!t)
0337 return -EINVAL;
0338
0339 access_idx = spec->raw_spec[i];
0340 acc = &spec->spec[spec->len];
0341
0342 if (btf_is_composite(t)) {
0343 const struct btf_member *m;
0344 __u32 bit_offset;
0345
0346 if (access_idx >= btf_vlen(t))
0347 return -EINVAL;
0348
0349 bit_offset = btf_member_bit_offset(t, access_idx);
0350 spec->bit_offset += bit_offset;
0351
0352 m = btf_members(t) + access_idx;
0353 if (m->name_off) {
0354 name = btf__name_by_offset(btf, m->name_off);
0355 if (str_is_empty(name))
0356 return -EINVAL;
0357
0358 acc->type_id = id;
0359 acc->idx = access_idx;
0360 acc->name = name;
0361 spec->len++;
0362 }
0363
0364 id = m->type;
0365 } else if (btf_is_array(t)) {
0366 const struct btf_array *a = btf_array(t);
0367 bool flex;
0368
0369 t = skip_mods_and_typedefs(btf, a->type, &id);
0370 if (!t)
0371 return -EINVAL;
0372
0373 flex = is_flex_arr(btf, acc - 1, a);
0374 if (!flex && access_idx >= a->nelems)
0375 return -EINVAL;
0376
0377 spec->spec[spec->len].type_id = id;
0378 spec->spec[spec->len].idx = access_idx;
0379 spec->len++;
0380
0381 sz = btf__resolve_size(btf, id);
0382 if (sz < 0)
0383 return sz;
0384 spec->bit_offset += access_idx * sz * 8;
0385 } else {
0386 pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
0387 prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
0388 return -EINVAL;
0389 }
0390 }
0391
0392 return 0;
0393 }
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 static int bpf_core_fields_are_compat(const struct btf *local_btf,
0413 __u32 local_id,
0414 const struct btf *targ_btf,
0415 __u32 targ_id)
0416 {
0417 const struct btf_type *local_type, *targ_type;
0418
0419 recur:
0420 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
0421 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
0422 if (!local_type || !targ_type)
0423 return -EINVAL;
0424
0425 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
0426 return 1;
0427 if (!btf_kind_core_compat(local_type, targ_type))
0428 return 0;
0429
0430 switch (btf_kind(local_type)) {
0431 case BTF_KIND_PTR:
0432 case BTF_KIND_FLOAT:
0433 return 1;
0434 case BTF_KIND_FWD:
0435 case BTF_KIND_ENUM64:
0436 case BTF_KIND_ENUM: {
0437 const char *local_name, *targ_name;
0438 size_t local_len, targ_len;
0439
0440 local_name = btf__name_by_offset(local_btf,
0441 local_type->name_off);
0442 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
0443 local_len = bpf_core_essential_name_len(local_name);
0444 targ_len = bpf_core_essential_name_len(targ_name);
0445
0446 return local_len == 0 || targ_len == 0 ||
0447 (local_len == targ_len &&
0448 strncmp(local_name, targ_name, local_len) == 0);
0449 }
0450 case BTF_KIND_INT:
0451
0452
0453
0454 return btf_int_offset(local_type) == 0 &&
0455 btf_int_offset(targ_type) == 0;
0456 case BTF_KIND_ARRAY:
0457 local_id = btf_array(local_type)->type;
0458 targ_id = btf_array(targ_type)->type;
0459 goto recur;
0460 default:
0461 return 0;
0462 }
0463 }
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 static int bpf_core_match_member(const struct btf *local_btf,
0482 const struct bpf_core_accessor *local_acc,
0483 const struct btf *targ_btf,
0484 __u32 targ_id,
0485 struct bpf_core_spec *spec,
0486 __u32 *next_targ_id)
0487 {
0488 const struct btf_type *local_type, *targ_type;
0489 const struct btf_member *local_member, *m;
0490 const char *local_name, *targ_name;
0491 __u32 local_id;
0492 int i, n, found;
0493
0494 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
0495 if (!targ_type)
0496 return -EINVAL;
0497 if (!btf_is_composite(targ_type))
0498 return 0;
0499
0500 local_id = local_acc->type_id;
0501 local_type = btf_type_by_id(local_btf, local_id);
0502 local_member = btf_members(local_type) + local_acc->idx;
0503 local_name = btf__name_by_offset(local_btf, local_member->name_off);
0504
0505 n = btf_vlen(targ_type);
0506 m = btf_members(targ_type);
0507 for (i = 0; i < n; i++, m++) {
0508 __u32 bit_offset;
0509
0510 bit_offset = btf_member_bit_offset(targ_type, i);
0511
0512
0513 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
0514 return -E2BIG;
0515
0516
0517 spec->bit_offset += bit_offset;
0518 spec->raw_spec[spec->raw_len++] = i;
0519
0520 targ_name = btf__name_by_offset(targ_btf, m->name_off);
0521 if (str_is_empty(targ_name)) {
0522
0523 found = bpf_core_match_member(local_btf, local_acc,
0524 targ_btf, m->type,
0525 spec, next_targ_id);
0526 if (found)
0527 return found;
0528 } else if (strcmp(local_name, targ_name) == 0) {
0529
0530 struct bpf_core_accessor *targ_acc;
0531
0532 targ_acc = &spec->spec[spec->len++];
0533 targ_acc->type_id = targ_id;
0534 targ_acc->idx = i;
0535 targ_acc->name = targ_name;
0536
0537 *next_targ_id = m->type;
0538 found = bpf_core_fields_are_compat(local_btf,
0539 local_member->type,
0540 targ_btf, m->type);
0541 if (!found)
0542 spec->len--;
0543 return found;
0544 }
0545
0546 spec->bit_offset -= bit_offset;
0547 spec->raw_len--;
0548 }
0549
0550 return 0;
0551 }
0552
0553
0554
0555
0556
0557 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
0558 const struct btf *targ_btf, __u32 targ_id,
0559 struct bpf_core_spec *targ_spec)
0560 {
0561 const struct btf_type *targ_type;
0562 const struct bpf_core_accessor *local_acc;
0563 struct bpf_core_accessor *targ_acc;
0564 int i, sz, matched;
0565 __u32 name_off;
0566
0567 memset(targ_spec, 0, sizeof(*targ_spec));
0568 targ_spec->btf = targ_btf;
0569 targ_spec->root_type_id = targ_id;
0570 targ_spec->relo_kind = local_spec->relo_kind;
0571
0572 if (core_relo_is_type_based(local_spec->relo_kind)) {
0573 if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES)
0574 return bpf_core_types_match(local_spec->btf,
0575 local_spec->root_type_id,
0576 targ_btf, targ_id);
0577 else
0578 return bpf_core_types_are_compat(local_spec->btf,
0579 local_spec->root_type_id,
0580 targ_btf, targ_id);
0581 }
0582
0583 local_acc = &local_spec->spec[0];
0584 targ_acc = &targ_spec->spec[0];
0585
0586 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
0587 size_t local_essent_len, targ_essent_len;
0588 const char *targ_name;
0589
0590
0591 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
0592 if (!btf_is_any_enum(targ_type))
0593 return 0;
0594
0595 local_essent_len = bpf_core_essential_name_len(local_acc->name);
0596
0597 for (i = 0; i < btf_vlen(targ_type); i++) {
0598 if (btf_is_enum(targ_type))
0599 name_off = btf_enum(targ_type)[i].name_off;
0600 else
0601 name_off = btf_enum64(targ_type)[i].name_off;
0602
0603 targ_name = btf__name_by_offset(targ_spec->btf, name_off);
0604 targ_essent_len = bpf_core_essential_name_len(targ_name);
0605 if (targ_essent_len != local_essent_len)
0606 continue;
0607 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
0608 targ_acc->type_id = targ_id;
0609 targ_acc->idx = i;
0610 targ_acc->name = targ_name;
0611 targ_spec->len++;
0612 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
0613 targ_spec->raw_len++;
0614 return 1;
0615 }
0616 }
0617 return 0;
0618 }
0619
0620 if (!core_relo_is_field_based(local_spec->relo_kind))
0621 return -EINVAL;
0622
0623 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
0624 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
0625 &targ_id);
0626 if (!targ_type)
0627 return -EINVAL;
0628
0629 if (local_acc->name) {
0630 matched = bpf_core_match_member(local_spec->btf,
0631 local_acc,
0632 targ_btf, targ_id,
0633 targ_spec, &targ_id);
0634 if (matched <= 0)
0635 return matched;
0636 } else {
0637
0638
0639
0640
0641 if (i > 0) {
0642 const struct btf_array *a;
0643 bool flex;
0644
0645 if (!btf_is_array(targ_type))
0646 return 0;
0647
0648 a = btf_array(targ_type);
0649 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
0650 if (!flex && local_acc->idx >= a->nelems)
0651 return 0;
0652 if (!skip_mods_and_typedefs(targ_btf, a->type,
0653 &targ_id))
0654 return -EINVAL;
0655 }
0656
0657
0658 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
0659 return -E2BIG;
0660
0661 targ_acc->type_id = targ_id;
0662 targ_acc->idx = local_acc->idx;
0663 targ_acc->name = NULL;
0664 targ_spec->len++;
0665 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
0666 targ_spec->raw_len++;
0667
0668 sz = btf__resolve_size(targ_btf, targ_id);
0669 if (sz < 0)
0670 return sz;
0671 targ_spec->bit_offset += local_acc->idx * sz * 8;
0672 }
0673 }
0674
0675 return 1;
0676 }
0677
0678 static int bpf_core_calc_field_relo(const char *prog_name,
0679 const struct bpf_core_relo *relo,
0680 const struct bpf_core_spec *spec,
0681 __u64 *val, __u32 *field_sz, __u32 *type_id,
0682 bool *validate)
0683 {
0684 const struct bpf_core_accessor *acc;
0685 const struct btf_type *t;
0686 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
0687 const struct btf_member *m;
0688 const struct btf_type *mt;
0689 bool bitfield;
0690 __s64 sz;
0691
0692 *field_sz = 0;
0693
0694 if (relo->kind == BPF_CORE_FIELD_EXISTS) {
0695 *val = spec ? 1 : 0;
0696 return 0;
0697 }
0698
0699 if (!spec)
0700 return -EUCLEAN;
0701
0702 acc = &spec->spec[spec->len - 1];
0703 t = btf_type_by_id(spec->btf, acc->type_id);
0704
0705
0706 if (!acc->name) {
0707 if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
0708 *val = spec->bit_offset / 8;
0709
0710 sz = btf__resolve_size(spec->btf, acc->type_id);
0711 if (sz < 0)
0712 return -EINVAL;
0713 *field_sz = sz;
0714 *type_id = acc->type_id;
0715 } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
0716 sz = btf__resolve_size(spec->btf, acc->type_id);
0717 if (sz < 0)
0718 return -EINVAL;
0719 *val = sz;
0720 } else {
0721 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
0722 prog_name, relo->kind, relo->insn_off / 8);
0723 return -EINVAL;
0724 }
0725 if (validate)
0726 *validate = true;
0727 return 0;
0728 }
0729
0730 m = btf_members(t) + acc->idx;
0731 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
0732 bit_off = spec->bit_offset;
0733 bit_sz = btf_member_bitfield_size(t, acc->idx);
0734
0735 bitfield = bit_sz > 0;
0736 if (bitfield) {
0737 byte_sz = mt->size;
0738 byte_off = bit_off / 8 / byte_sz * byte_sz;
0739
0740 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
0741 if (byte_sz >= 8) {
0742
0743 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
0744 prog_name, relo->kind, relo->insn_off / 8);
0745 return -E2BIG;
0746 }
0747 byte_sz *= 2;
0748 byte_off = bit_off / 8 / byte_sz * byte_sz;
0749 }
0750 } else {
0751 sz = btf__resolve_size(spec->btf, field_type_id);
0752 if (sz < 0)
0753 return -EINVAL;
0754 byte_sz = sz;
0755 byte_off = spec->bit_offset / 8;
0756 bit_sz = byte_sz * 8;
0757 }
0758
0759
0760
0761
0762
0763 if (validate)
0764 *validate = !bitfield;
0765
0766 switch (relo->kind) {
0767 case BPF_CORE_FIELD_BYTE_OFFSET:
0768 *val = byte_off;
0769 if (!bitfield) {
0770 *field_sz = byte_sz;
0771 *type_id = field_type_id;
0772 }
0773 break;
0774 case BPF_CORE_FIELD_BYTE_SIZE:
0775 *val = byte_sz;
0776 break;
0777 case BPF_CORE_FIELD_SIGNED:
0778 *val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
0779 (btf_int_encoding(mt) & BTF_INT_SIGNED);
0780 if (validate)
0781 *validate = true;
0782 break;
0783 case BPF_CORE_FIELD_LSHIFT_U64:
0784 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0785 *val = 64 - (bit_off + bit_sz - byte_off * 8);
0786 #else
0787 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
0788 #endif
0789 break;
0790 case BPF_CORE_FIELD_RSHIFT_U64:
0791 *val = 64 - bit_sz;
0792 if (validate)
0793 *validate = true;
0794 break;
0795 case BPF_CORE_FIELD_EXISTS:
0796 default:
0797 return -EOPNOTSUPP;
0798 }
0799
0800 return 0;
0801 }
0802
0803 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
0804 const struct bpf_core_spec *spec,
0805 __u64 *val, bool *validate)
0806 {
0807 __s64 sz;
0808
0809
0810 if (validate)
0811 *validate = true;
0812
0813
0814 if (!spec) {
0815 *val = 0;
0816 return 0;
0817 }
0818
0819 switch (relo->kind) {
0820 case BPF_CORE_TYPE_ID_TARGET:
0821 *val = spec->root_type_id;
0822
0823
0824
0825 if (validate)
0826 *validate = false;
0827 break;
0828 case BPF_CORE_TYPE_EXISTS:
0829 case BPF_CORE_TYPE_MATCHES:
0830 *val = 1;
0831 break;
0832 case BPF_CORE_TYPE_SIZE:
0833 sz = btf__resolve_size(spec->btf, spec->root_type_id);
0834 if (sz < 0)
0835 return -EINVAL;
0836 *val = sz;
0837 break;
0838 case BPF_CORE_TYPE_ID_LOCAL:
0839
0840 default:
0841 return -EOPNOTSUPP;
0842 }
0843
0844 return 0;
0845 }
0846
0847 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
0848 const struct bpf_core_spec *spec,
0849 __u64 *val)
0850 {
0851 const struct btf_type *t;
0852
0853 switch (relo->kind) {
0854 case BPF_CORE_ENUMVAL_EXISTS:
0855 *val = spec ? 1 : 0;
0856 break;
0857 case BPF_CORE_ENUMVAL_VALUE:
0858 if (!spec)
0859 return -EUCLEAN;
0860 t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
0861 if (btf_is_enum(t))
0862 *val = btf_enum(t)[spec->spec[0].idx].val;
0863 else
0864 *val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx);
0865 break;
0866 default:
0867 return -EOPNOTSUPP;
0868 }
0869
0870 return 0;
0871 }
0872
0873
0874
0875
0876
0877
0878
0879 static int bpf_core_calc_relo(const char *prog_name,
0880 const struct bpf_core_relo *relo,
0881 int relo_idx,
0882 const struct bpf_core_spec *local_spec,
0883 const struct bpf_core_spec *targ_spec,
0884 struct bpf_core_relo_res *res)
0885 {
0886 int err = -EOPNOTSUPP;
0887
0888 res->orig_val = 0;
0889 res->new_val = 0;
0890 res->poison = false;
0891 res->validate = true;
0892 res->fail_memsz_adjust = false;
0893 res->orig_sz = res->new_sz = 0;
0894 res->orig_type_id = res->new_type_id = 0;
0895
0896 if (core_relo_is_field_based(relo->kind)) {
0897 err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
0898 &res->orig_val, &res->orig_sz,
0899 &res->orig_type_id, &res->validate);
0900 err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
0901 &res->new_val, &res->new_sz,
0902 &res->new_type_id, NULL);
0903 if (err)
0904 goto done;
0905
0906
0907
0908
0909 res->fail_memsz_adjust = false;
0910 if (res->orig_sz != res->new_sz) {
0911 const struct btf_type *orig_t, *new_t;
0912
0913 orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
0914 new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
0931 goto done;
0932 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
0933 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
0934 btf_int_encoding(new_t) != BTF_INT_SIGNED)
0935 goto done;
0936
0937
0938
0939
0940 res->fail_memsz_adjust = true;
0941 }
0942 } else if (core_relo_is_type_based(relo->kind)) {
0943 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
0944 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
0945 } else if (core_relo_is_enumval_based(relo->kind)) {
0946 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
0947 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
0948 }
0949
0950 done:
0951 if (err == -EUCLEAN) {
0952
0953 res->poison = true;
0954 err = 0;
0955 } else if (err == -EOPNOTSUPP) {
0956
0957 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
0958 prog_name, relo_idx, core_relo_kind_str(relo->kind),
0959 relo->kind, relo->insn_off / 8);
0960 }
0961
0962 return err;
0963 }
0964
0965
0966
0967
0968
0969 static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
0970 int insn_idx, struct bpf_insn *insn)
0971 {
0972 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
0973 prog_name, relo_idx, insn_idx);
0974 insn->code = BPF_JMP | BPF_CALL;
0975 insn->dst_reg = 0;
0976 insn->src_reg = 0;
0977 insn->off = 0;
0978
0979
0980
0981
0982 insn->imm = 195896080;
0983 }
0984
0985 static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
0986 {
0987 switch (BPF_SIZE(insn->code)) {
0988 case BPF_DW: return 8;
0989 case BPF_W: return 4;
0990 case BPF_H: return 2;
0991 case BPF_B: return 1;
0992 default: return -1;
0993 }
0994 }
0995
0996 static int insn_bytes_to_bpf_size(__u32 sz)
0997 {
0998 switch (sz) {
0999 case 8: return BPF_DW;
1000 case 4: return BPF_W;
1001 case 2: return BPF_H;
1002 case 1: return BPF_B;
1003 default: return -1;
1004 }
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
1025 int insn_idx, const struct bpf_core_relo *relo,
1026 int relo_idx, const struct bpf_core_relo_res *res)
1027 {
1028 __u64 orig_val, new_val;
1029 __u8 class;
1030
1031 class = BPF_CLASS(insn->code);
1032
1033 if (res->poison) {
1034 poison:
1035
1036
1037
1038 if (is_ldimm64_insn(insn))
1039 bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
1040 bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
1041 return 0;
1042 }
1043
1044 orig_val = res->orig_val;
1045 new_val = res->new_val;
1046
1047 switch (class) {
1048 case BPF_ALU:
1049 case BPF_ALU64:
1050 if (BPF_SRC(insn->code) != BPF_K)
1051 return -EINVAL;
1052 if (res->validate && insn->imm != orig_val) {
1053 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n",
1054 prog_name, relo_idx,
1055 insn_idx, insn->imm, (unsigned long long)orig_val,
1056 (unsigned long long)new_val);
1057 return -EINVAL;
1058 }
1059 orig_val = insn->imm;
1060 insn->imm = new_val;
1061 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n",
1062 prog_name, relo_idx, insn_idx,
1063 (unsigned long long)orig_val, (unsigned long long)new_val);
1064 break;
1065 case BPF_LDX:
1066 case BPF_ST:
1067 case BPF_STX:
1068 if (res->validate && insn->off != orig_val) {
1069 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n",
1070 prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val,
1071 (unsigned long long)new_val);
1072 return -EINVAL;
1073 }
1074 if (new_val > SHRT_MAX) {
1075 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n",
1076 prog_name, relo_idx, insn_idx, (unsigned long long)new_val);
1077 return -ERANGE;
1078 }
1079 if (res->fail_memsz_adjust) {
1080 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
1081 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
1082 prog_name, relo_idx, insn_idx);
1083 goto poison;
1084 }
1085
1086 orig_val = insn->off;
1087 insn->off = new_val;
1088 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n",
1089 prog_name, relo_idx, insn_idx, (unsigned long long)orig_val,
1090 (unsigned long long)new_val);
1091
1092 if (res->new_sz != res->orig_sz) {
1093 int insn_bytes_sz, insn_bpf_sz;
1094
1095 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
1096 if (insn_bytes_sz != res->orig_sz) {
1097 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
1098 prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
1099 return -EINVAL;
1100 }
1101
1102 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
1103 if (insn_bpf_sz < 0) {
1104 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
1105 prog_name, relo_idx, insn_idx, res->new_sz);
1106 return -EINVAL;
1107 }
1108
1109 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
1110 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
1111 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
1112 }
1113 break;
1114 case BPF_LD: {
1115 __u64 imm;
1116
1117 if (!is_ldimm64_insn(insn) ||
1118 insn[0].src_reg != 0 || insn[0].off != 0 ||
1119 insn[1].code != 0 || insn[1].dst_reg != 0 ||
1120 insn[1].src_reg != 0 || insn[1].off != 0) {
1121 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
1122 prog_name, relo_idx, insn_idx);
1123 return -EINVAL;
1124 }
1125
1126 imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32);
1127 if (res->validate && imm != orig_val) {
1128 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n",
1129 prog_name, relo_idx,
1130 insn_idx, (unsigned long long)imm,
1131 (unsigned long long)orig_val, (unsigned long long)new_val);
1132 return -EINVAL;
1133 }
1134
1135 insn[0].imm = new_val;
1136 insn[1].imm = new_val >> 32;
1137 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n",
1138 prog_name, relo_idx, insn_idx,
1139 (unsigned long long)imm, (unsigned long long)new_val);
1140 break;
1141 }
1142 default:
1143 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
1144 prog_name, relo_idx, insn_idx, insn->code,
1145 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
1146 return -EINVAL;
1147 }
1148
1149 return 0;
1150 }
1151
1152
1153
1154
1155
1156 int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
1157 {
1158 const struct btf_type *t;
1159 const char *s;
1160 __u32 type_id;
1161 int i, len = 0;
1162
1163 #define append_buf(fmt, args...) \
1164 ({ \
1165 int r; \
1166 r = snprintf(buf, buf_sz, fmt, ##args); \
1167 len += r; \
1168 if (r >= buf_sz) \
1169 r = buf_sz; \
1170 buf += r; \
1171 buf_sz -= r; \
1172 })
1173
1174 type_id = spec->root_type_id;
1175 t = btf_type_by_id(spec->btf, type_id);
1176 s = btf__name_by_offset(spec->btf, t->name_off);
1177
1178 append_buf("<%s> [%u] %s %s",
1179 core_relo_kind_str(spec->relo_kind),
1180 type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
1181
1182 if (core_relo_is_type_based(spec->relo_kind))
1183 return len;
1184
1185 if (core_relo_is_enumval_based(spec->relo_kind)) {
1186 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
1187 if (btf_is_enum(t)) {
1188 const struct btf_enum *e;
1189 const char *fmt_str;
1190
1191 e = btf_enum(t) + spec->raw_spec[0];
1192 s = btf__name_by_offset(spec->btf, e->name_off);
1193 fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u";
1194 append_buf(fmt_str, s, e->val);
1195 } else {
1196 const struct btf_enum64 *e;
1197 const char *fmt_str;
1198
1199 e = btf_enum64(t) + spec->raw_spec[0];
1200 s = btf__name_by_offset(spec->btf, e->name_off);
1201 fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu";
1202 append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e));
1203 }
1204 return len;
1205 }
1206
1207 if (core_relo_is_field_based(spec->relo_kind)) {
1208 for (i = 0; i < spec->len; i++) {
1209 if (spec->spec[i].name)
1210 append_buf(".%s", spec->spec[i].name);
1211 else if (i > 0 || spec->spec[i].idx > 0)
1212 append_buf("[%u]", spec->spec[i].idx);
1213 }
1214
1215 append_buf(" (");
1216 for (i = 0; i < spec->raw_len; i++)
1217 append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
1218
1219 if (spec->bit_offset % 8)
1220 append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
1221 else
1222 append_buf(" @ offset %u)", spec->bit_offset / 8);
1223 return len;
1224 }
1225
1226 return len;
1227 #undef append_buf
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 int bpf_core_calc_relo_insn(const char *prog_name,
1281 const struct bpf_core_relo *relo,
1282 int relo_idx,
1283 const struct btf *local_btf,
1284 struct bpf_core_cand_list *cands,
1285 struct bpf_core_spec *specs_scratch,
1286 struct bpf_core_relo_res *targ_res)
1287 {
1288 struct bpf_core_spec *local_spec = &specs_scratch[0];
1289 struct bpf_core_spec *cand_spec = &specs_scratch[1];
1290 struct bpf_core_spec *targ_spec = &specs_scratch[2];
1291 struct bpf_core_relo_res cand_res;
1292 const struct btf_type *local_type;
1293 const char *local_name;
1294 __u32 local_id;
1295 char spec_buf[256];
1296 int i, j, err;
1297
1298 local_id = relo->type_id;
1299 local_type = btf_type_by_id(local_btf, local_id);
1300 local_name = btf__name_by_offset(local_btf, local_type->name_off);
1301 if (!local_name)
1302 return -EINVAL;
1303
1304 err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
1305 if (err) {
1306 const char *spec_str;
1307
1308 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
1309 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
1310 prog_name, relo_idx, local_id, btf_kind_str(local_type),
1311 str_is_empty(local_name) ? "<anon>" : local_name,
1312 spec_str ?: "<?>", err);
1313 return -EINVAL;
1314 }
1315
1316 bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
1317 pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
1318
1319
1320 if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
1321
1322 memset(targ_res, 0, sizeof(*targ_res));
1323 targ_res->validate = false;
1324 targ_res->poison = false;
1325 targ_res->orig_val = local_spec->root_type_id;
1326 targ_res->new_val = local_spec->root_type_id;
1327 return 0;
1328 }
1329
1330
1331 if (str_is_empty(local_name)) {
1332 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
1333 prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1334 return -EOPNOTSUPP;
1335 }
1336
1337 for (i = 0, j = 0; i < cands->len; i++) {
1338 err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
1339 cands->cands[i].id, cand_spec);
1340 if (err < 0) {
1341 bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
1342 pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
1343 prog_name, relo_idx, i, spec_buf, err);
1344 return err;
1345 }
1346
1347 bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
1348 pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
1349 relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
1350
1351 if (err == 0)
1352 continue;
1353
1354 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
1355 if (err)
1356 return err;
1357
1358 if (j == 0) {
1359 *targ_res = cand_res;
1360 *targ_spec = *cand_spec;
1361 } else if (cand_spec->bit_offset != targ_spec->bit_offset) {
1362
1363
1364
1365 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
1366 prog_name, relo_idx, cand_spec->bit_offset,
1367 targ_spec->bit_offset);
1368 return -EINVAL;
1369 } else if (cand_res.poison != targ_res->poison ||
1370 cand_res.new_val != targ_res->new_val) {
1371
1372
1373
1374
1375 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n",
1376 prog_name, relo_idx,
1377 cand_res.poison ? "failure" : "success",
1378 (unsigned long long)cand_res.new_val,
1379 targ_res->poison ? "failure" : "success",
1380 (unsigned long long)targ_res->new_val);
1381 return -EINVAL;
1382 }
1383
1384 cands->cands[j++] = cands->cands[i];
1385 }
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 if (j > 0)
1396 cands->len = j;
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 if (j == 0) {
1410 pr_debug("prog '%s': relo #%d: no matching targets found\n",
1411 prog_name, relo_idx);
1412
1413
1414 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);
1415 if (err)
1416 return err;
1417 }
1418
1419 return 0;
1420 }
1421
1422 static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off,
1423 const struct btf *targ_btf, size_t targ_name_off)
1424 {
1425 const char *local_n, *targ_n;
1426 size_t local_len, targ_len;
1427
1428 local_n = btf__name_by_offset(local_btf, local_name_off);
1429 targ_n = btf__name_by_offset(targ_btf, targ_name_off);
1430
1431 if (str_is_empty(targ_n))
1432 return str_is_empty(local_n);
1433
1434 targ_len = bpf_core_essential_name_len(targ_n);
1435 local_len = bpf_core_essential_name_len(local_n);
1436
1437 return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0;
1438 }
1439
1440 static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t,
1441 const struct btf *targ_btf, const struct btf_type *targ_t)
1442 {
1443 __u16 local_vlen = btf_vlen(local_t);
1444 __u16 targ_vlen = btf_vlen(targ_t);
1445 int i, j;
1446
1447 if (local_t->size != targ_t->size)
1448 return 0;
1449
1450 if (local_vlen > targ_vlen)
1451 return 0;
1452
1453
1454
1455
1456 for (i = 0; i < local_vlen; i++) {
1457 bool matched = false;
1458 __u32 local_n_off, targ_n_off;
1459
1460 local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off :
1461 btf_enum64(local_t)[i].name_off;
1462
1463 for (j = 0; j < targ_vlen; j++) {
1464 targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off :
1465 btf_enum64(targ_t)[j].name_off;
1466
1467 if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) {
1468 matched = true;
1469 break;
1470 }
1471 }
1472
1473 if (!matched)
1474 return 0;
1475 }
1476 return 1;
1477 }
1478
1479 static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t,
1480 const struct btf *targ_btf, const struct btf_type *targ_t,
1481 bool behind_ptr, int level)
1482 {
1483 const struct btf_member *local_m = btf_members(local_t);
1484 __u16 local_vlen = btf_vlen(local_t);
1485 __u16 targ_vlen = btf_vlen(targ_t);
1486 int i, j, err;
1487
1488 if (local_vlen > targ_vlen)
1489 return 0;
1490
1491
1492 for (i = 0; i < local_vlen; i++, local_m++) {
1493 const struct btf_member *targ_m = btf_members(targ_t);
1494 bool matched = false;
1495
1496 for (j = 0; j < targ_vlen; j++, targ_m++) {
1497 if (!bpf_core_names_match(local_btf, local_m->name_off,
1498 targ_btf, targ_m->name_off))
1499 continue;
1500
1501 err = __bpf_core_types_match(local_btf, local_m->type, targ_btf,
1502 targ_m->type, behind_ptr, level - 1);
1503 if (err < 0)
1504 return err;
1505 if (err > 0) {
1506 matched = true;
1507 break;
1508 }
1509 }
1510
1511 if (!matched)
1512 return 0;
1513 }
1514 return 1;
1515 }
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
1545 __u32 targ_id, bool behind_ptr, int level)
1546 {
1547 const struct btf_type *local_t, *targ_t;
1548 int depth = 32;
1549 __u16 local_k, targ_k;
1550
1551 if (level <= 0)
1552 return -EINVAL;
1553
1554 local_t = btf_type_by_id(local_btf, local_id);
1555 targ_t = btf_type_by_id(targ_btf, targ_id);
1556
1557 recur:
1558 depth--;
1559 if (depth < 0)
1560 return -EINVAL;
1561
1562 local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id);
1563 targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
1564 if (!local_t || !targ_t)
1565 return -EINVAL;
1566
1567
1568
1569
1570
1571 if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off))
1572 return 0;
1573
1574 local_k = btf_kind(local_t);
1575 targ_k = btf_kind(targ_t);
1576
1577 switch (local_k) {
1578 case BTF_KIND_UNKN:
1579 return local_k == targ_k;
1580 case BTF_KIND_FWD: {
1581 bool local_f = BTF_INFO_KFLAG(local_t->info);
1582
1583 if (behind_ptr) {
1584 if (local_k == targ_k)
1585 return local_f == BTF_INFO_KFLAG(targ_t->info);
1586
1587
1588
1589
1590 return (targ_k == BTF_KIND_STRUCT && !local_f) ||
1591 (targ_k == BTF_KIND_UNION && local_f);
1592 } else {
1593 if (local_k != targ_k)
1594 return 0;
1595
1596
1597 return local_f == BTF_INFO_KFLAG(targ_t->info);
1598 }
1599 }
1600 case BTF_KIND_ENUM:
1601 case BTF_KIND_ENUM64:
1602 if (!btf_is_any_enum(targ_t))
1603 return 0;
1604
1605 return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t);
1606 case BTF_KIND_STRUCT:
1607 case BTF_KIND_UNION:
1608 if (behind_ptr) {
1609 bool targ_f = BTF_INFO_KFLAG(targ_t->info);
1610
1611 if (local_k == targ_k)
1612 return 1;
1613
1614 if (targ_k != BTF_KIND_FWD)
1615 return 0;
1616
1617 return (local_k == BTF_KIND_UNION) == targ_f;
1618 } else {
1619 if (local_k != targ_k)
1620 return 0;
1621
1622 return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t,
1623 behind_ptr, level);
1624 }
1625 case BTF_KIND_INT: {
1626 __u8 local_sgn;
1627 __u8 targ_sgn;
1628
1629 if (local_k != targ_k)
1630 return 0;
1631
1632 local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED;
1633 targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED;
1634
1635 return local_t->size == targ_t->size && local_sgn == targ_sgn;
1636 }
1637 case BTF_KIND_PTR:
1638 if (local_k != targ_k)
1639 return 0;
1640
1641 behind_ptr = true;
1642
1643 local_id = local_t->type;
1644 targ_id = targ_t->type;
1645 goto recur;
1646 case BTF_KIND_ARRAY: {
1647 const struct btf_array *local_array = btf_array(local_t);
1648 const struct btf_array *targ_array = btf_array(targ_t);
1649
1650 if (local_k != targ_k)
1651 return 0;
1652
1653 if (local_array->nelems != targ_array->nelems)
1654 return 0;
1655
1656 local_id = local_array->type;
1657 targ_id = targ_array->type;
1658 goto recur;
1659 }
1660 case BTF_KIND_FUNC_PROTO: {
1661 struct btf_param *local_p = btf_params(local_t);
1662 struct btf_param *targ_p = btf_params(targ_t);
1663 __u16 local_vlen = btf_vlen(local_t);
1664 __u16 targ_vlen = btf_vlen(targ_t);
1665 int i, err;
1666
1667 if (local_k != targ_k)
1668 return 0;
1669
1670 if (local_vlen != targ_vlen)
1671 return 0;
1672
1673 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
1674 err = __bpf_core_types_match(local_btf, local_p->type, targ_btf,
1675 targ_p->type, behind_ptr, level - 1);
1676 if (err <= 0)
1677 return err;
1678 }
1679
1680
1681 local_id = local_t->type;
1682 targ_id = targ_t->type;
1683 goto recur;
1684 }
1685 default:
1686 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
1687 btf_kind_str(local_t), local_id, targ_id);
1688 return 0;
1689 }
1690 }