0001
0002 #include <linux/moduleloader.h>
0003 #include <linux/workqueue.h>
0004 #include <linux/netdevice.h>
0005 #include <linux/filter.h>
0006 #include <linux/cache.h>
0007 #include <linux/if_vlan.h>
0008
0009 #include <asm/cacheflush.h>
0010 #include <asm/ptrace.h>
0011
0012 #include "bpf_jit_32.h"
0013
0014 static inline bool is_simm13(unsigned int value)
0015 {
0016 return value + 0x1000 < 0x2000;
0017 }
0018
0019 #define SEEN_DATAREF 1
0020 #define SEEN_XREG 2
0021 #define SEEN_MEM 4
0022
0023 #define S13(X) ((X) & 0x1fff)
0024 #define IMMED 0x00002000
0025 #define RD(X) ((X) << 25)
0026 #define RS1(X) ((X) << 14)
0027 #define RS2(X) ((X))
0028 #define OP(X) ((X) << 30)
0029 #define OP2(X) ((X) << 22)
0030 #define OP3(X) ((X) << 19)
0031 #define COND(X) ((X) << 25)
0032 #define F1(X) OP(X)
0033 #define F2(X, Y) (OP(X) | OP2(Y))
0034 #define F3(X, Y) (OP(X) | OP3(Y))
0035
0036 #define CONDN COND(0x0)
0037 #define CONDE COND(0x1)
0038 #define CONDLE COND(0x2)
0039 #define CONDL COND(0x3)
0040 #define CONDLEU COND(0x4)
0041 #define CONDCS COND(0x5)
0042 #define CONDNEG COND(0x6)
0043 #define CONDVC COND(0x7)
0044 #define CONDA COND(0x8)
0045 #define CONDNE COND(0x9)
0046 #define CONDG COND(0xa)
0047 #define CONDGE COND(0xb)
0048 #define CONDGU COND(0xc)
0049 #define CONDCC COND(0xd)
0050 #define CONDPOS COND(0xe)
0051 #define CONDVS COND(0xf)
0052
0053 #define CONDGEU CONDCC
0054 #define CONDLU CONDCS
0055
0056 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
0057
0058 #define BA (F2(0, 2) | CONDA)
0059 #define BGU (F2(0, 2) | CONDGU)
0060 #define BLEU (F2(0, 2) | CONDLEU)
0061 #define BGEU (F2(0, 2) | CONDGEU)
0062 #define BLU (F2(0, 2) | CONDLU)
0063 #define BE (F2(0, 2) | CONDE)
0064 #define BNE (F2(0, 2) | CONDNE)
0065
0066 #define BE_PTR BE
0067
0068 #define SETHI(K, REG) \
0069 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
0070 #define OR_LO(K, REG) \
0071 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
0072
0073 #define ADD F3(2, 0x00)
0074 #define AND F3(2, 0x01)
0075 #define ANDCC F3(2, 0x11)
0076 #define OR F3(2, 0x02)
0077 #define XOR F3(2, 0x03)
0078 #define SUB F3(2, 0x04)
0079 #define SUBCC F3(2, 0x14)
0080 #define MUL F3(2, 0x0a)
0081 #define DIV F3(2, 0x0e)
0082 #define SLL F3(2, 0x25)
0083 #define SRL F3(2, 0x26)
0084 #define JMPL F3(2, 0x38)
0085 #define CALL F1(1)
0086 #define BR F2(0, 0x01)
0087 #define RD_Y F3(2, 0x28)
0088 #define WR_Y F3(2, 0x30)
0089
0090 #define LD32 F3(3, 0x00)
0091 #define LD8 F3(3, 0x01)
0092 #define LD16 F3(3, 0x02)
0093 #define LD64 F3(3, 0x0b)
0094 #define ST32 F3(3, 0x04)
0095
0096 #define LDPTR LD32
0097 #define BASE_STACKFRAME 96
0098
0099 #define LD32I (LD32 | IMMED)
0100 #define LD8I (LD8 | IMMED)
0101 #define LD16I (LD16 | IMMED)
0102 #define LD64I (LD64 | IMMED)
0103 #define LDPTRI (LDPTR | IMMED)
0104 #define ST32I (ST32 | IMMED)
0105
0106 #define emit_nop() \
0107 do { \
0108 *prog++ = SETHI(0, G0); \
0109 } while (0)
0110
0111 #define emit_neg() \
0112 do { \
0113 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \
0114 } while (0)
0115
0116 #define emit_reg_move(FROM, TO) \
0117 do { \
0118 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \
0119 } while (0)
0120
0121 #define emit_clear(REG) \
0122 do { \
0123 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \
0124 } while (0)
0125
0126 #define emit_set_const(K, REG) \
0127 do { \
0128 *prog++ = SETHI(K, REG); \
0129 \
0130 *prog++ = OR_LO(K, REG); \
0131 } while (0)
0132
0133
0134
0135
0136
0137 #define emit_alu_X(OPCODE) \
0138 do { \
0139 seen |= SEEN_XREG; \
0140 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \
0141 } while (0)
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 #define emit_alu_K(OPCODE, K) \
0158 do { \
0159 if (K || OPCODE == AND || OPCODE == MUL) { \
0160 unsigned int _insn = OPCODE; \
0161 _insn |= RS1(r_A) | RD(r_A); \
0162 if (is_simm13(K)) { \
0163 *prog++ = _insn | IMMED | S13(K); \
0164 } else { \
0165 emit_set_const(K, r_TMP); \
0166 *prog++ = _insn | RS2(r_TMP); \
0167 } \
0168 } \
0169 } while (0)
0170
0171 #define emit_loadimm(K, DEST) \
0172 do { \
0173 if (is_simm13(K)) { \
0174 \
0175 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \
0176 } else { \
0177 emit_set_const(K, DEST); \
0178 } \
0179 } while (0)
0180
0181 #define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
0182 do { unsigned int _off = offsetof(STRUCT, FIELD); \
0183 BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(void *)); \
0184 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
0185 } while (0)
0186
0187 #define emit_load32(BASE, STRUCT, FIELD, DEST) \
0188 do { unsigned int _off = offsetof(STRUCT, FIELD); \
0189 BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u32)); \
0190 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
0191 } while (0)
0192
0193 #define emit_load16(BASE, STRUCT, FIELD, DEST) \
0194 do { unsigned int _off = offsetof(STRUCT, FIELD); \
0195 BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u16)); \
0196 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
0197 } while (0)
0198
0199 #define __emit_load8(BASE, STRUCT, FIELD, DEST) \
0200 do { unsigned int _off = offsetof(STRUCT, FIELD); \
0201 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \
0202 } while (0)
0203
0204 #define emit_load8(BASE, STRUCT, FIELD, DEST) \
0205 do { BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u8)); \
0206 __emit_load8(BASE, STRUCT, FIELD, DEST); \
0207 } while (0)
0208
0209 #define BIAS (-4)
0210
0211 #define emit_ldmem(OFF, DEST) \
0212 do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \
0213 } while (0)
0214
0215 #define emit_stmem(OFF, SRC) \
0216 do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \
0217 } while (0)
0218
0219 #ifdef CONFIG_SMP
0220 #define emit_load_cpu(REG) \
0221 emit_load32(G6, struct thread_info, cpu, REG)
0222 #else
0223 #define emit_load_cpu(REG) emit_clear(REG)
0224 #endif
0225
0226 #define emit_skb_loadptr(FIELD, DEST) \
0227 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
0228 #define emit_skb_load32(FIELD, DEST) \
0229 emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
0230 #define emit_skb_load16(FIELD, DEST) \
0231 emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
0232 #define __emit_skb_load8(FIELD, DEST) \
0233 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
0234 #define emit_skb_load8(FIELD, DEST) \
0235 emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
0236
0237 #define emit_jmpl(BASE, IMM_OFF, LREG) \
0238 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
0239
0240 #define emit_call(FUNC) \
0241 do { void *_here = image + addrs[i] - 8; \
0242 unsigned int _off = (void *)(FUNC) - _here; \
0243 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \
0244 emit_nop(); \
0245 } while (0)
0246
0247 #define emit_branch(BR_OPC, DEST) \
0248 do { unsigned int _here = addrs[i] - 8; \
0249 *prog++ = BR_OPC | WDISP22((DEST) - _here); \
0250 } while (0)
0251
0252 #define emit_branch_off(BR_OPC, OFF) \
0253 do { *prog++ = BR_OPC | WDISP22(OFF); \
0254 } while (0)
0255
0256 #define emit_jump(DEST) emit_branch(BA, DEST)
0257
0258 #define emit_read_y(REG) *prog++ = RD_Y | RD(REG)
0259 #define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
0260
0261 #define emit_cmp(R1, R2) \
0262 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
0263
0264 #define emit_cmpi(R1, IMM) \
0265 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
0266
0267 #define emit_btst(R1, R2) \
0268 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
0269
0270 #define emit_btsti(R1, IMM) \
0271 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
0272
0273 #define emit_sub(R1, R2, R3) \
0274 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
0275
0276 #define emit_subi(R1, IMM, R3) \
0277 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
0278
0279 #define emit_add(R1, R2, R3) \
0280 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
0281
0282 #define emit_addi(R1, IMM, R3) \
0283 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
0284
0285 #define emit_and(R1, R2, R3) \
0286 *prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3))
0287
0288 #define emit_andi(R1, IMM, R3) \
0289 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3))
0290
0291 #define emit_alloc_stack(SZ) \
0292 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
0293
0294 #define emit_release_stack(SZ) \
0295 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 void bpf_jit_compile(struct bpf_prog *fp)
0327 {
0328 unsigned int cleanup_addr, proglen, oldproglen = 0;
0329 u32 temp[8], *prog, *func, seen = 0, pass;
0330 const struct sock_filter *filter = fp->insns;
0331 int i, flen = fp->len, pc_ret0 = -1;
0332 unsigned int *addrs;
0333 void *image;
0334
0335 if (!bpf_jit_enable)
0336 return;
0337
0338 addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL);
0339 if (addrs == NULL)
0340 return;
0341
0342
0343
0344
0345 for (proglen = 0, i = 0; i < flen; i++) {
0346 proglen += 64;
0347 addrs[i] = proglen;
0348 }
0349 cleanup_addr = proglen;
0350 image = NULL;
0351 for (pass = 0; pass < 10; pass++) {
0352 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
0353
0354
0355 proglen = 0;
0356 prog = temp;
0357
0358
0359 if (seen_or_pass0) {
0360 if (seen_or_pass0 & SEEN_MEM) {
0361 unsigned int sz = BASE_STACKFRAME;
0362 sz += BPF_MEMWORDS * sizeof(u32);
0363 emit_alloc_stack(sz);
0364 }
0365
0366
0367 if (seen_or_pass0 & SEEN_XREG)
0368 emit_clear(r_X);
0369
0370
0371
0372
0373
0374
0375
0376
0377 if (seen_or_pass0 & SEEN_DATAREF) {
0378 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
0379 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
0380 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
0381 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
0382 }
0383 }
0384 emit_reg_move(O7, r_saved_O7);
0385
0386
0387 if (bpf_needs_clear_a(&filter[0]))
0388 emit_clear(r_A);
0389
0390 for (i = 0; i < flen; i++) {
0391 unsigned int K = filter[i].k;
0392 unsigned int t_offset;
0393 unsigned int f_offset;
0394 u32 t_op, f_op;
0395 u16 code = bpf_anc_helper(&filter[i]);
0396 int ilen;
0397
0398 switch (code) {
0399 case BPF_ALU | BPF_ADD | BPF_X:
0400 emit_alu_X(ADD);
0401 break;
0402 case BPF_ALU | BPF_ADD | BPF_K:
0403 emit_alu_K(ADD, K);
0404 break;
0405 case BPF_ALU | BPF_SUB | BPF_X:
0406 emit_alu_X(SUB);
0407 break;
0408 case BPF_ALU | BPF_SUB | BPF_K:
0409 emit_alu_K(SUB, K);
0410 break;
0411 case BPF_ALU | BPF_AND | BPF_X:
0412 emit_alu_X(AND);
0413 break;
0414 case BPF_ALU | BPF_AND | BPF_K:
0415 emit_alu_K(AND, K);
0416 break;
0417 case BPF_ALU | BPF_OR | BPF_X:
0418 emit_alu_X(OR);
0419 break;
0420 case BPF_ALU | BPF_OR | BPF_K:
0421 emit_alu_K(OR, K);
0422 break;
0423 case BPF_ANC | SKF_AD_ALU_XOR_X:
0424 case BPF_ALU | BPF_XOR | BPF_X:
0425 emit_alu_X(XOR);
0426 break;
0427 case BPF_ALU | BPF_XOR | BPF_K:
0428 emit_alu_K(XOR, K);
0429 break;
0430 case BPF_ALU | BPF_LSH | BPF_X:
0431 emit_alu_X(SLL);
0432 break;
0433 case BPF_ALU | BPF_LSH | BPF_K:
0434 emit_alu_K(SLL, K);
0435 break;
0436 case BPF_ALU | BPF_RSH | BPF_X:
0437 emit_alu_X(SRL);
0438 break;
0439 case BPF_ALU | BPF_RSH | BPF_K:
0440 emit_alu_K(SRL, K);
0441 break;
0442 case BPF_ALU | BPF_MUL | BPF_X:
0443 emit_alu_X(MUL);
0444 break;
0445 case BPF_ALU | BPF_MUL | BPF_K:
0446 emit_alu_K(MUL, K);
0447 break;
0448 case BPF_ALU | BPF_DIV | BPF_K:
0449 if (K == 1)
0450 break;
0451 emit_write_y(G0);
0452
0453
0454
0455
0456 emit_nop();
0457 emit_nop();
0458 emit_nop();
0459 emit_alu_K(DIV, K);
0460 break;
0461 case BPF_ALU | BPF_DIV | BPF_X:
0462 emit_cmpi(r_X, 0);
0463 if (pc_ret0 > 0) {
0464 t_offset = addrs[pc_ret0 - 1];
0465 emit_branch(BE, t_offset + 20);
0466 emit_nop();
0467 } else {
0468 emit_branch_off(BNE, 16);
0469 emit_nop();
0470 emit_jump(cleanup_addr + 20);
0471 emit_clear(r_A);
0472 }
0473 emit_write_y(G0);
0474
0475
0476
0477
0478 emit_nop();
0479 emit_nop();
0480 emit_nop();
0481 emit_alu_X(DIV);
0482 break;
0483 case BPF_ALU | BPF_NEG:
0484 emit_neg();
0485 break;
0486 case BPF_RET | BPF_K:
0487 if (!K) {
0488 if (pc_ret0 == -1)
0489 pc_ret0 = i;
0490 emit_clear(r_A);
0491 } else {
0492 emit_loadimm(K, r_A);
0493 }
0494 fallthrough;
0495 case BPF_RET | BPF_A:
0496 if (seen_or_pass0) {
0497 if (i != flen - 1) {
0498 emit_jump(cleanup_addr);
0499 emit_nop();
0500 break;
0501 }
0502 if (seen_or_pass0 & SEEN_MEM) {
0503 unsigned int sz = BASE_STACKFRAME;
0504 sz += BPF_MEMWORDS * sizeof(u32);
0505 emit_release_stack(sz);
0506 }
0507 }
0508
0509 emit_jmpl(r_saved_O7, 8, G0);
0510 emit_reg_move(r_A, O0);
0511 break;
0512 case BPF_MISC | BPF_TAX:
0513 seen |= SEEN_XREG;
0514 emit_reg_move(r_A, r_X);
0515 break;
0516 case BPF_MISC | BPF_TXA:
0517 seen |= SEEN_XREG;
0518 emit_reg_move(r_X, r_A);
0519 break;
0520 case BPF_ANC | SKF_AD_CPU:
0521 emit_load_cpu(r_A);
0522 break;
0523 case BPF_ANC | SKF_AD_PROTOCOL:
0524 emit_skb_load16(protocol, r_A);
0525 break;
0526 case BPF_ANC | SKF_AD_PKTTYPE:
0527 __emit_skb_load8(__pkt_type_offset, r_A);
0528 emit_andi(r_A, PKT_TYPE_MAX, r_A);
0529 emit_alu_K(SRL, 5);
0530 break;
0531 case BPF_ANC | SKF_AD_IFINDEX:
0532 emit_skb_loadptr(dev, r_A);
0533 emit_cmpi(r_A, 0);
0534 emit_branch(BE_PTR, cleanup_addr + 4);
0535 emit_nop();
0536 emit_load32(r_A, struct net_device, ifindex, r_A);
0537 break;
0538 case BPF_ANC | SKF_AD_MARK:
0539 emit_skb_load32(mark, r_A);
0540 break;
0541 case BPF_ANC | SKF_AD_QUEUE:
0542 emit_skb_load16(queue_mapping, r_A);
0543 break;
0544 case BPF_ANC | SKF_AD_HATYPE:
0545 emit_skb_loadptr(dev, r_A);
0546 emit_cmpi(r_A, 0);
0547 emit_branch(BE_PTR, cleanup_addr + 4);
0548 emit_nop();
0549 emit_load16(r_A, struct net_device, type, r_A);
0550 break;
0551 case BPF_ANC | SKF_AD_RXHASH:
0552 emit_skb_load32(hash, r_A);
0553 break;
0554 case BPF_ANC | SKF_AD_VLAN_TAG:
0555 emit_skb_load16(vlan_tci, r_A);
0556 break;
0557 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
0558 __emit_skb_load8(__pkt_vlan_present_offset, r_A);
0559 if (PKT_VLAN_PRESENT_BIT)
0560 emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT);
0561 if (PKT_VLAN_PRESENT_BIT < 7)
0562 emit_andi(r_A, 1, r_A);
0563 break;
0564 case BPF_LD | BPF_W | BPF_LEN:
0565 emit_skb_load32(len, r_A);
0566 break;
0567 case BPF_LDX | BPF_W | BPF_LEN:
0568 emit_skb_load32(len, r_X);
0569 break;
0570 case BPF_LD | BPF_IMM:
0571 emit_loadimm(K, r_A);
0572 break;
0573 case BPF_LDX | BPF_IMM:
0574 emit_loadimm(K, r_X);
0575 break;
0576 case BPF_LD | BPF_MEM:
0577 seen |= SEEN_MEM;
0578 emit_ldmem(K * 4, r_A);
0579 break;
0580 case BPF_LDX | BPF_MEM:
0581 seen |= SEEN_MEM | SEEN_XREG;
0582 emit_ldmem(K * 4, r_X);
0583 break;
0584 case BPF_ST:
0585 seen |= SEEN_MEM;
0586 emit_stmem(K * 4, r_A);
0587 break;
0588 case BPF_STX:
0589 seen |= SEEN_MEM | SEEN_XREG;
0590 emit_stmem(K * 4, r_X);
0591 break;
0592
0593 #define CHOOSE_LOAD_FUNC(K, func) \
0594 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
0595
0596 case BPF_LD | BPF_W | BPF_ABS:
0597 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
0598 common_load: seen |= SEEN_DATAREF;
0599 emit_loadimm(K, r_OFF);
0600 emit_call(func);
0601 break;
0602 case BPF_LD | BPF_H | BPF_ABS:
0603 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
0604 goto common_load;
0605 case BPF_LD | BPF_B | BPF_ABS:
0606 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
0607 goto common_load;
0608 case BPF_LDX | BPF_B | BPF_MSH:
0609 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
0610 goto common_load;
0611 case BPF_LD | BPF_W | BPF_IND:
0612 func = bpf_jit_load_word;
0613 common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
0614 if (K) {
0615 if (is_simm13(K)) {
0616 emit_addi(r_X, K, r_OFF);
0617 } else {
0618 emit_loadimm(K, r_TMP);
0619 emit_add(r_X, r_TMP, r_OFF);
0620 }
0621 } else {
0622 emit_reg_move(r_X, r_OFF);
0623 }
0624 emit_call(func);
0625 break;
0626 case BPF_LD | BPF_H | BPF_IND:
0627 func = bpf_jit_load_half;
0628 goto common_load_ind;
0629 case BPF_LD | BPF_B | BPF_IND:
0630 func = bpf_jit_load_byte;
0631 goto common_load_ind;
0632 case BPF_JMP | BPF_JA:
0633 emit_jump(addrs[i + K]);
0634 emit_nop();
0635 break;
0636
0637 #define COND_SEL(CODE, TOP, FOP) \
0638 case CODE: \
0639 t_op = TOP; \
0640 f_op = FOP; \
0641 goto cond_branch
0642
0643 COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
0644 COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
0645 COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
0646 COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
0647 COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
0648 COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
0649 COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
0650 COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
0651
0652 cond_branch: f_offset = addrs[i + filter[i].jf];
0653 t_offset = addrs[i + filter[i].jt];
0654
0655
0656 if (filter[i].jt == filter[i].jf) {
0657 emit_jump(t_offset);
0658 emit_nop();
0659 break;
0660 }
0661
0662 switch (code) {
0663 case BPF_JMP | BPF_JGT | BPF_X:
0664 case BPF_JMP | BPF_JGE | BPF_X:
0665 case BPF_JMP | BPF_JEQ | BPF_X:
0666 seen |= SEEN_XREG;
0667 emit_cmp(r_A, r_X);
0668 break;
0669 case BPF_JMP | BPF_JSET | BPF_X:
0670 seen |= SEEN_XREG;
0671 emit_btst(r_A, r_X);
0672 break;
0673 case BPF_JMP | BPF_JEQ | BPF_K:
0674 case BPF_JMP | BPF_JGT | BPF_K:
0675 case BPF_JMP | BPF_JGE | BPF_K:
0676 if (is_simm13(K)) {
0677 emit_cmpi(r_A, K);
0678 } else {
0679 emit_loadimm(K, r_TMP);
0680 emit_cmp(r_A, r_TMP);
0681 }
0682 break;
0683 case BPF_JMP | BPF_JSET | BPF_K:
0684 if (is_simm13(K)) {
0685 emit_btsti(r_A, K);
0686 } else {
0687 emit_loadimm(K, r_TMP);
0688 emit_btst(r_A, r_TMP);
0689 }
0690 break;
0691 }
0692 if (filter[i].jt != 0) {
0693 if (filter[i].jf)
0694 t_offset += 8;
0695 emit_branch(t_op, t_offset);
0696 emit_nop();
0697 if (filter[i].jf) {
0698 emit_jump(f_offset);
0699 emit_nop();
0700 }
0701 break;
0702 }
0703 emit_branch(f_op, f_offset);
0704 emit_nop();
0705 break;
0706
0707 default:
0708
0709 goto out;
0710 }
0711 ilen = (void *) prog - (void *) temp;
0712 if (image) {
0713 if (unlikely(proglen + ilen > oldproglen)) {
0714 pr_err("bpb_jit_compile fatal error\n");
0715 kfree(addrs);
0716 module_memfree(image);
0717 return;
0718 }
0719 memcpy(image + proglen, temp, ilen);
0720 }
0721 proglen += ilen;
0722 addrs[i] = proglen;
0723 prog = temp;
0724 }
0725
0726
0727
0728 cleanup_addr = proglen - 8;
0729 if (seen_or_pass0 & SEEN_MEM)
0730 cleanup_addr -= 4;
0731
0732 if (image) {
0733 if (proglen != oldproglen)
0734 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
0735 proglen, oldproglen);
0736 break;
0737 }
0738 if (proglen == oldproglen) {
0739 image = module_alloc(proglen);
0740 if (!image)
0741 goto out;
0742 }
0743 oldproglen = proglen;
0744 }
0745
0746 if (bpf_jit_enable > 1)
0747 bpf_jit_dump(flen, proglen, pass + 1, image);
0748
0749 if (image) {
0750 fp->bpf_func = (void *)image;
0751 fp->jited = 1;
0752 }
0753 out:
0754 kfree(addrs);
0755 return;
0756 }
0757
0758 void bpf_jit_free(struct bpf_prog *fp)
0759 {
0760 if (fp->jited)
0761 module_memfree(fp->bpf_func);
0762
0763 bpf_prog_unlock_free(fp);
0764 }