Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
0002 /* Copyright (C) 2018 Netronome Systems, Inc. */
0003 
0004 #include <linux/list.h>
0005 #include <stdlib.h>
0006 #include <string.h>
0007 
0008 #include "cfg.h"
0009 #include "main.h"
0010 #include "xlated_dumper.h"
0011 
0012 struct cfg {
0013     struct list_head funcs;
0014     int func_num;
0015 };
0016 
0017 struct func_node {
0018     struct list_head l;
0019     struct list_head bbs;
0020     struct bpf_insn *start;
0021     struct bpf_insn *end;
0022     int idx;
0023     int bb_num;
0024 };
0025 
0026 struct bb_node {
0027     struct list_head l;
0028     struct list_head e_prevs;
0029     struct list_head e_succs;
0030     struct bpf_insn *head;
0031     struct bpf_insn *tail;
0032     int idx;
0033 };
0034 
0035 #define EDGE_FLAG_EMPTY     0x0
0036 #define EDGE_FLAG_FALLTHROUGH   0x1
0037 #define EDGE_FLAG_JUMP      0x2
0038 struct edge_node {
0039     struct list_head l;
0040     struct bb_node *src;
0041     struct bb_node *dst;
0042     int flags;
0043 };
0044 
0045 #define ENTRY_BLOCK_INDEX   0
0046 #define EXIT_BLOCK_INDEX    1
0047 #define NUM_FIXED_BLOCKS    2
0048 #define func_prev(func)     list_prev_entry(func, l)
0049 #define func_next(func)     list_next_entry(func, l)
0050 #define bb_prev(bb)     list_prev_entry(bb, l)
0051 #define bb_next(bb)     list_next_entry(bb, l)
0052 #define entry_bb(func)      func_first_bb(func)
0053 #define exit_bb(func)       func_last_bb(func)
0054 #define cfg_first_func(cfg) \
0055     list_first_entry(&cfg->funcs, struct func_node, l)
0056 #define cfg_last_func(cfg)  \
0057     list_last_entry(&cfg->funcs, struct func_node, l)
0058 #define func_first_bb(func) \
0059     list_first_entry(&func->bbs, struct bb_node, l)
0060 #define func_last_bb(func)  \
0061     list_last_entry(&func->bbs, struct bb_node, l)
0062 
0063 static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
0064 {
0065     struct func_node *new_func, *func;
0066 
0067     list_for_each_entry(func, &cfg->funcs, l) {
0068         if (func->start == insn)
0069             return func;
0070         else if (func->start > insn)
0071             break;
0072     }
0073 
0074     func = func_prev(func);
0075     new_func = calloc(1, sizeof(*new_func));
0076     if (!new_func) {
0077         p_err("OOM when allocating FUNC node");
0078         return NULL;
0079     }
0080     new_func->start = insn;
0081     new_func->idx = cfg->func_num;
0082     list_add(&new_func->l, &func->l);
0083     cfg->func_num++;
0084 
0085     return new_func;
0086 }
0087 
0088 static struct bb_node *func_append_bb(struct func_node *func,
0089                       struct bpf_insn *insn)
0090 {
0091     struct bb_node *new_bb, *bb;
0092 
0093     list_for_each_entry(bb, &func->bbs, l) {
0094         if (bb->head == insn)
0095             return bb;
0096         else if (bb->head > insn)
0097             break;
0098     }
0099 
0100     bb = bb_prev(bb);
0101     new_bb = calloc(1, sizeof(*new_bb));
0102     if (!new_bb) {
0103         p_err("OOM when allocating BB node");
0104         return NULL;
0105     }
0106     new_bb->head = insn;
0107     INIT_LIST_HEAD(&new_bb->e_prevs);
0108     INIT_LIST_HEAD(&new_bb->e_succs);
0109     list_add(&new_bb->l, &bb->l);
0110 
0111     return new_bb;
0112 }
0113 
0114 static struct bb_node *func_insert_dummy_bb(struct list_head *after)
0115 {
0116     struct bb_node *bb;
0117 
0118     bb = calloc(1, sizeof(*bb));
0119     if (!bb) {
0120         p_err("OOM when allocating BB node");
0121         return NULL;
0122     }
0123 
0124     INIT_LIST_HEAD(&bb->e_prevs);
0125     INIT_LIST_HEAD(&bb->e_succs);
0126     list_add(&bb->l, after);
0127 
0128     return bb;
0129 }
0130 
0131 static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
0132                 struct bpf_insn *end)
0133 {
0134     struct func_node *func, *last_func;
0135 
0136     func = cfg_append_func(cfg, cur);
0137     if (!func)
0138         return true;
0139 
0140     for (; cur < end; cur++) {
0141         if (cur->code != (BPF_JMP | BPF_CALL))
0142             continue;
0143         if (cur->src_reg != BPF_PSEUDO_CALL)
0144             continue;
0145         func = cfg_append_func(cfg, cur + cur->off + 1);
0146         if (!func)
0147             return true;
0148     }
0149 
0150     last_func = cfg_last_func(cfg);
0151     last_func->end = end - 1;
0152     func = cfg_first_func(cfg);
0153     list_for_each_entry_from(func, &last_func->l, l) {
0154         func->end = func_next(func)->start - 1;
0155     }
0156 
0157     return false;
0158 }
0159 
0160 static bool is_jmp_insn(__u8 code)
0161 {
0162     return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
0163 }
0164 
0165 static bool func_partition_bb_head(struct func_node *func)
0166 {
0167     struct bpf_insn *cur, *end;
0168     struct bb_node *bb;
0169 
0170     cur = func->start;
0171     end = func->end;
0172     INIT_LIST_HEAD(&func->bbs);
0173     bb = func_append_bb(func, cur);
0174     if (!bb)
0175         return true;
0176 
0177     for (; cur <= end; cur++) {
0178         if (is_jmp_insn(cur->code)) {
0179             __u8 opcode = BPF_OP(cur->code);
0180 
0181             if (opcode == BPF_EXIT || opcode == BPF_CALL)
0182                 continue;
0183 
0184             bb = func_append_bb(func, cur + cur->off + 1);
0185             if (!bb)
0186                 return true;
0187 
0188             if (opcode != BPF_JA) {
0189                 bb = func_append_bb(func, cur + 1);
0190                 if (!bb)
0191                     return true;
0192             }
0193         }
0194     }
0195 
0196     return false;
0197 }
0198 
0199 static void func_partition_bb_tail(struct func_node *func)
0200 {
0201     unsigned int bb_idx = NUM_FIXED_BLOCKS;
0202     struct bb_node *bb, *last;
0203 
0204     last = func_last_bb(func);
0205     last->tail = func->end;
0206     bb = func_first_bb(func);
0207     list_for_each_entry_from(bb, &last->l, l) {
0208         bb->tail = bb_next(bb)->head - 1;
0209         bb->idx = bb_idx++;
0210     }
0211 
0212     last->idx = bb_idx++;
0213     func->bb_num = bb_idx;
0214 }
0215 
0216 static bool func_add_special_bb(struct func_node *func)
0217 {
0218     struct bb_node *bb;
0219 
0220     bb = func_insert_dummy_bb(&func->bbs);
0221     if (!bb)
0222         return true;
0223     bb->idx = ENTRY_BLOCK_INDEX;
0224 
0225     bb = func_insert_dummy_bb(&func_last_bb(func)->l);
0226     if (!bb)
0227         return true;
0228     bb->idx = EXIT_BLOCK_INDEX;
0229 
0230     return false;
0231 }
0232 
0233 static bool func_partition_bb(struct func_node *func)
0234 {
0235     if (func_partition_bb_head(func))
0236         return true;
0237 
0238     func_partition_bb_tail(func);
0239 
0240     return false;
0241 }
0242 
0243 static struct bb_node *func_search_bb_with_head(struct func_node *func,
0244                         struct bpf_insn *insn)
0245 {
0246     struct bb_node *bb;
0247 
0248     list_for_each_entry(bb, &func->bbs, l) {
0249         if (bb->head == insn)
0250             return bb;
0251     }
0252 
0253     return NULL;
0254 }
0255 
0256 static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
0257                   int flags)
0258 {
0259     struct edge_node *e;
0260 
0261     e = calloc(1, sizeof(*e));
0262     if (!e) {
0263         p_err("OOM when allocating edge node");
0264         return NULL;
0265     }
0266 
0267     if (src)
0268         e->src = src;
0269     if (dst)
0270         e->dst = dst;
0271 
0272     e->flags |= flags;
0273 
0274     return e;
0275 }
0276 
0277 static bool func_add_bb_edges(struct func_node *func)
0278 {
0279     struct bpf_insn *insn;
0280     struct edge_node *e;
0281     struct bb_node *bb;
0282 
0283     bb = entry_bb(func);
0284     e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
0285     if (!e)
0286         return true;
0287     list_add_tail(&e->l, &bb->e_succs);
0288 
0289     bb = exit_bb(func);
0290     e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
0291     if (!e)
0292         return true;
0293     list_add_tail(&e->l, &bb->e_prevs);
0294 
0295     bb = entry_bb(func);
0296     bb = bb_next(bb);
0297     list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
0298         e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
0299         if (!e)
0300             return true;
0301         e->src = bb;
0302 
0303         insn = bb->tail;
0304         if (!is_jmp_insn(insn->code) ||
0305             BPF_OP(insn->code) == BPF_EXIT) {
0306             e->dst = bb_next(bb);
0307             e->flags |= EDGE_FLAG_FALLTHROUGH;
0308             list_add_tail(&e->l, &bb->e_succs);
0309             continue;
0310         } else if (BPF_OP(insn->code) == BPF_JA) {
0311             e->dst = func_search_bb_with_head(func,
0312                               insn + insn->off + 1);
0313             e->flags |= EDGE_FLAG_JUMP;
0314             list_add_tail(&e->l, &bb->e_succs);
0315             continue;
0316         }
0317 
0318         e->dst = bb_next(bb);
0319         e->flags |= EDGE_FLAG_FALLTHROUGH;
0320         list_add_tail(&e->l, &bb->e_succs);
0321 
0322         e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
0323         if (!e)
0324             return true;
0325         e->src = bb;
0326         e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
0327         list_add_tail(&e->l, &bb->e_succs);
0328     }
0329 
0330     return false;
0331 }
0332 
0333 static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
0334 {
0335     int cnt = len / sizeof(*insn);
0336     struct func_node *func;
0337 
0338     INIT_LIST_HEAD(&cfg->funcs);
0339 
0340     if (cfg_partition_funcs(cfg, insn, insn + cnt))
0341         return true;
0342 
0343     list_for_each_entry(func, &cfg->funcs, l) {
0344         if (func_partition_bb(func) || func_add_special_bb(func))
0345             return true;
0346 
0347         if (func_add_bb_edges(func))
0348             return true;
0349     }
0350 
0351     return false;
0352 }
0353 
0354 static void cfg_destroy(struct cfg *cfg)
0355 {
0356     struct func_node *func, *func2;
0357 
0358     list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
0359         struct bb_node *bb, *bb2;
0360 
0361         list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
0362             struct edge_node *e, *e2;
0363 
0364             list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
0365                 list_del(&e->l);
0366                 free(e);
0367             }
0368 
0369             list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
0370                 list_del(&e->l);
0371                 free(e);
0372             }
0373 
0374             list_del(&bb->l);
0375             free(bb);
0376         }
0377 
0378         list_del(&func->l);
0379         free(func);
0380     }
0381 }
0382 
0383 static void draw_bb_node(struct func_node *func, struct bb_node *bb)
0384 {
0385     const char *shape;
0386 
0387     if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
0388         shape = "Mdiamond";
0389     else
0390         shape = "record";
0391 
0392     printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
0393            func->idx, bb->idx, shape);
0394 
0395     if (bb->idx == ENTRY_BLOCK_INDEX) {
0396         printf("ENTRY");
0397     } else if (bb->idx == EXIT_BLOCK_INDEX) {
0398         printf("EXIT");
0399     } else {
0400         unsigned int start_idx;
0401         struct dump_data dd = {};
0402 
0403         printf("{");
0404         kernel_syms_load(&dd);
0405         start_idx = bb->head - func->start;
0406         dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
0407         kernel_syms_destroy(&dd);
0408         printf("}");
0409     }
0410 
0411     printf("\"];\n\n");
0412 }
0413 
0414 static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
0415 {
0416     const char *style = "\"solid,bold\"";
0417     const char *color = "black";
0418     int func_idx = func->idx;
0419     struct edge_node *e;
0420     int weight = 10;
0421 
0422     if (list_empty(&bb->e_succs))
0423         return;
0424 
0425     list_for_each_entry(e, &bb->e_succs, l) {
0426         printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
0427                func_idx, e->src->idx, func_idx, e->dst->idx,
0428                style, color, weight);
0429         printf("];\n");
0430     }
0431 }
0432 
0433 static void func_output_bb_def(struct func_node *func)
0434 {
0435     struct bb_node *bb;
0436 
0437     list_for_each_entry(bb, &func->bbs, l) {
0438         draw_bb_node(func, bb);
0439     }
0440 }
0441 
0442 static void func_output_edges(struct func_node *func)
0443 {
0444     int func_idx = func->idx;
0445     struct bb_node *bb;
0446 
0447     list_for_each_entry(bb, &func->bbs, l) {
0448         draw_bb_succ_edges(func, bb);
0449     }
0450 
0451     /* Add an invisible edge from ENTRY to EXIT, this is to
0452      * improve the graph layout.
0453      */
0454     printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
0455            func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
0456 }
0457 
0458 static void cfg_dump(struct cfg *cfg)
0459 {
0460     struct func_node *func;
0461 
0462     printf("digraph \"DOT graph for eBPF program\" {\n");
0463     list_for_each_entry(func, &cfg->funcs, l) {
0464         printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
0465                func->idx, func->idx);
0466         func_output_bb_def(func);
0467         func_output_edges(func);
0468         printf("}\n");
0469     }
0470     printf("}\n");
0471 }
0472 
0473 void dump_xlated_cfg(void *buf, unsigned int len)
0474 {
0475     struct bpf_insn *insn = buf;
0476     struct cfg cfg;
0477 
0478     memset(&cfg, 0, sizeof(cfg));
0479     if (cfg_build(&cfg, insn, len))
0480         return;
0481 
0482     cfg_dump(&cfg);
0483 
0484     cfg_destroy(&cfg);
0485 }