0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 #include <linux/limits.h>
0106 #include <linux/bitops.h>
0107 #include <linux/errno.h>
0108 #include <linux/filter.h>
0109 #include <linux/bpf.h>
0110 #include <linux/slab.h>
0111 #include <asm/bitops.h>
0112 #include <asm/cacheflush.h>
0113 #include <asm/cpu-features.h>
0114 #include <asm/isa-rev.h>
0115 #include <asm/uasm.h>
0116
0117 #include "bpf_jit_comp.h"
0118
0119
0120 #define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT)
0121 #define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT)
0122
0123
0124
0125
0126
0127 int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
0128 {
0129 int reg;
0130
0131 for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
0132 if (mask & BIT(reg)) {
0133 if ((excl & BIT(reg)) == 0) {
0134 if (sizeof(long) == 4)
0135 emit(ctx, sw, reg, depth, MIPS_R_SP);
0136 else
0137 emit(ctx, sd, reg, depth, MIPS_R_SP);
0138 }
0139 depth += sizeof(long);
0140 }
0141
0142 ctx->stack_used = max((int)ctx->stack_used, depth);
0143 return depth;
0144 }
0145
0146
0147
0148
0149
0150 int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
0151 {
0152 int reg;
0153
0154 for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
0155 if (mask & BIT(reg)) {
0156 if ((excl & BIT(reg)) == 0) {
0157 if (sizeof(long) == 4)
0158 emit(ctx, lw, reg, depth, MIPS_R_SP);
0159 else
0160 emit(ctx, ld, reg, depth, MIPS_R_SP);
0161 }
0162 depth += sizeof(long);
0163 }
0164
0165 return depth;
0166 }
0167
0168
0169 int get_target(struct jit_context *ctx, u32 loc)
0170 {
0171 u32 index = INDEX(ctx->descriptors[loc]);
0172 unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index];
0173 unsigned long addr = (unsigned long)&ctx->target[index];
0174
0175 if (!ctx->target)
0176 return 0;
0177
0178 if ((addr ^ pc) & ~MIPS_JMP_MASK)
0179 return -1;
0180
0181 return addr & MIPS_JMP_MASK;
0182 }
0183
0184
0185 int get_offset(const struct jit_context *ctx, int off)
0186 {
0187 return (INDEX(ctx->descriptors[ctx->bpf_index + off]) -
0188 ctx->jit_index - 1) * sizeof(u32);
0189 }
0190
0191
0192 void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm)
0193 {
0194 if (imm >= -0x8000 && imm <= 0x7fff) {
0195 emit(ctx, addiu, dst, MIPS_R_ZERO, imm);
0196 } else {
0197 emit(ctx, lui, dst, (s16)((u32)imm >> 16));
0198 emit(ctx, ori, dst, dst, (u16)(imm & 0xffff));
0199 }
0200 clobber_reg(ctx, dst);
0201 }
0202
0203
0204 void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src)
0205 {
0206 emit(ctx, ori, dst, src, 0);
0207 clobber_reg(ctx, dst);
0208 }
0209
0210
0211 bool valid_alu_i(u8 op, s32 imm)
0212 {
0213 switch (BPF_OP(op)) {
0214 case BPF_NEG:
0215 case BPF_LSH:
0216 case BPF_RSH:
0217 case BPF_ARSH:
0218
0219 return true;
0220 case BPF_ADD:
0221
0222 return imm >= -0x8000 && imm <= 0x7fff;
0223 case BPF_SUB:
0224
0225 return imm >= -0x7fff && imm <= 0x8000;
0226 case BPF_AND:
0227 case BPF_OR:
0228 case BPF_XOR:
0229
0230 return imm >= 0 && imm <= 0xffff;
0231 case BPF_MUL:
0232
0233 return imm == 0 || (imm > 0 && is_power_of_2(imm));
0234 case BPF_DIV:
0235 case BPF_MOD:
0236
0237 return (u32)imm <= 0x10000 && is_power_of_2((u32)imm);
0238 }
0239 return false;
0240 }
0241
0242
0243 bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val)
0244 {
0245 bool act = true;
0246
0247 switch (BPF_OP(op)) {
0248 case BPF_LSH:
0249 case BPF_RSH:
0250 case BPF_ARSH:
0251 case BPF_ADD:
0252 case BPF_SUB:
0253 case BPF_OR:
0254 case BPF_XOR:
0255
0256 act = imm != 0;
0257 break;
0258 case BPF_MUL:
0259 if (imm == 1) {
0260
0261 act = false;
0262 } else if (imm == 0) {
0263
0264 op = BPF_AND;
0265 } else {
0266
0267 op = BPF_LSH;
0268 imm = ilog2(abs(imm));
0269 }
0270 break;
0271 case BPF_DIV:
0272 if (imm == 1) {
0273
0274 act = false;
0275 } else {
0276
0277 op = BPF_RSH;
0278 imm = ilog2(imm);
0279 }
0280 break;
0281 case BPF_MOD:
0282
0283 op = BPF_AND;
0284 imm--;
0285 break;
0286 }
0287
0288 *alu = op;
0289 *val = imm;
0290 return act;
0291 }
0292
0293
0294 void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op)
0295 {
0296 switch (BPF_OP(op)) {
0297
0298 case BPF_NEG:
0299 emit(ctx, subu, dst, MIPS_R_ZERO, dst);
0300 break;
0301
0302 case BPF_AND:
0303 emit(ctx, andi, dst, dst, (u16)imm);
0304 break;
0305
0306 case BPF_OR:
0307 emit(ctx, ori, dst, dst, (u16)imm);
0308 break;
0309
0310 case BPF_XOR:
0311 emit(ctx, xori, dst, dst, (u16)imm);
0312 break;
0313
0314 case BPF_LSH:
0315 emit(ctx, sll, dst, dst, imm);
0316 break;
0317
0318 case BPF_RSH:
0319 emit(ctx, srl, dst, dst, imm);
0320 break;
0321
0322 case BPF_ARSH:
0323 emit(ctx, sra, dst, dst, imm);
0324 break;
0325
0326 case BPF_ADD:
0327 emit(ctx, addiu, dst, dst, imm);
0328 break;
0329
0330 case BPF_SUB:
0331 emit(ctx, addiu, dst, dst, -imm);
0332 break;
0333 }
0334 clobber_reg(ctx, dst);
0335 }
0336
0337
0338 void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op)
0339 {
0340 switch (BPF_OP(op)) {
0341
0342 case BPF_AND:
0343 emit(ctx, and, dst, dst, src);
0344 break;
0345
0346 case BPF_OR:
0347 emit(ctx, or, dst, dst, src);
0348 break;
0349
0350 case BPF_XOR:
0351 emit(ctx, xor, dst, dst, src);
0352 break;
0353
0354 case BPF_LSH:
0355 emit(ctx, sllv, dst, dst, src);
0356 break;
0357
0358 case BPF_RSH:
0359 emit(ctx, srlv, dst, dst, src);
0360 break;
0361
0362 case BPF_ARSH:
0363 emit(ctx, srav, dst, dst, src);
0364 break;
0365
0366 case BPF_ADD:
0367 emit(ctx, addu, dst, dst, src);
0368 break;
0369
0370 case BPF_SUB:
0371 emit(ctx, subu, dst, dst, src);
0372 break;
0373
0374 case BPF_MUL:
0375 if (cpu_has_mips32r1 || cpu_has_mips32r6) {
0376 emit(ctx, mul, dst, dst, src);
0377 } else {
0378 emit(ctx, multu, dst, src);
0379 emit(ctx, mflo, dst);
0380 }
0381 break;
0382
0383 case BPF_DIV:
0384 if (cpu_has_mips32r6) {
0385 emit(ctx, divu_r6, dst, dst, src);
0386 } else {
0387 emit(ctx, divu, dst, src);
0388 emit(ctx, mflo, dst);
0389 }
0390 break;
0391
0392 case BPF_MOD:
0393 if (cpu_has_mips32r6) {
0394 emit(ctx, modu, dst, dst, src);
0395 } else {
0396 emit(ctx, divu, dst, src);
0397 emit(ctx, mfhi, dst);
0398 }
0399 break;
0400 }
0401 clobber_reg(ctx, dst);
0402 }
0403
0404
0405 void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code)
0406 {
0407 LLSC_sync(ctx);
0408 emit(ctx, ll, MIPS_R_T9, off, dst);
0409 switch (code) {
0410 case BPF_ADD:
0411 case BPF_ADD | BPF_FETCH:
0412 emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src);
0413 break;
0414 case BPF_AND:
0415 case BPF_AND | BPF_FETCH:
0416 emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src);
0417 break;
0418 case BPF_OR:
0419 case BPF_OR | BPF_FETCH:
0420 emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src);
0421 break;
0422 case BPF_XOR:
0423 case BPF_XOR | BPF_FETCH:
0424 emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src);
0425 break;
0426 case BPF_XCHG:
0427 emit(ctx, move, MIPS_R_T8, src);
0428 break;
0429 }
0430 emit(ctx, sc, MIPS_R_T8, off, dst);
0431 emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset);
0432 emit(ctx, nop);
0433
0434 if (code & BPF_FETCH) {
0435 emit(ctx, move, src, MIPS_R_T9);
0436 clobber_reg(ctx, src);
0437 }
0438 }
0439
0440
0441 void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off)
0442 {
0443 LLSC_sync(ctx);
0444 emit(ctx, ll, MIPS_R_T9, off, dst);
0445 emit(ctx, bne, MIPS_R_T9, res, 12);
0446 emit(ctx, move, MIPS_R_T8, src);
0447 emit(ctx, sc, MIPS_R_T8, off, dst);
0448 emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset);
0449 emit(ctx, move, res, MIPS_R_T9);
0450 clobber_reg(ctx, res);
0451 }
0452
0453
0454 void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width)
0455 {
0456 u8 tmp = MIPS_R_T8;
0457 u8 msk = MIPS_R_T9;
0458
0459 switch (width) {
0460
0461 case 32:
0462 if (cpu_has_mips32r2 || cpu_has_mips32r6) {
0463 emit(ctx, wsbh, dst, dst);
0464 emit(ctx, rotr, dst, dst, 16);
0465 } else {
0466 emit(ctx, sll, tmp, dst, 16);
0467 emit(ctx, srl, dst, dst, 16);
0468 emit(ctx, or, dst, dst, tmp);
0469
0470 emit(ctx, lui, msk, 0xff);
0471 emit(ctx, ori, msk, msk, 0xff);
0472
0473 emit(ctx, and, tmp, dst, msk);
0474 emit(ctx, sll, tmp, tmp, 8);
0475 emit(ctx, srl, dst, dst, 8);
0476 emit(ctx, and, dst, dst, msk);
0477 emit(ctx, or, dst, dst, tmp);
0478 }
0479 break;
0480
0481 case 16:
0482 if (cpu_has_mips32r2 || cpu_has_mips32r6) {
0483 emit(ctx, wsbh, dst, dst);
0484 emit(ctx, andi, dst, dst, 0xffff);
0485 } else {
0486 emit(ctx, andi, tmp, dst, 0xff00);
0487 emit(ctx, srl, tmp, tmp, 8);
0488 emit(ctx, andi, dst, dst, 0x00ff);
0489 emit(ctx, sll, dst, dst, 8);
0490 emit(ctx, or, dst, dst, tmp);
0491 }
0492 break;
0493 }
0494 clobber_reg(ctx, dst);
0495 }
0496
0497
0498 bool valid_jmp_i(u8 op, s32 imm)
0499 {
0500 switch (op) {
0501 case JIT_JNOP:
0502
0503 return true;
0504 case BPF_JEQ:
0505 case BPF_JNE:
0506
0507 return false;
0508 case BPF_JSET:
0509 case JIT_JNSET:
0510
0511 return imm >= 0 && imm <= 0xffff;
0512 case BPF_JGE:
0513 case BPF_JLT:
0514 case BPF_JSGE:
0515 case BPF_JSLT:
0516
0517 return imm >= -0x8000 && imm <= 0x7fff;
0518 case BPF_JGT:
0519 case BPF_JLE:
0520 case BPF_JSGT:
0521 case BPF_JSLE:
0522
0523 return imm >= -0x8001 && imm <= 0x7ffe;
0524 }
0525 return false;
0526 }
0527
0528
0529 static u8 invert_jmp(u8 op)
0530 {
0531 switch (op) {
0532 case BPF_JA: return JIT_JNOP;
0533 case BPF_JEQ: return BPF_JNE;
0534 case BPF_JNE: return BPF_JEQ;
0535 case BPF_JSET: return JIT_JNSET;
0536 case BPF_JGT: return BPF_JLE;
0537 case BPF_JGE: return BPF_JLT;
0538 case BPF_JLT: return BPF_JGE;
0539 case BPF_JLE: return BPF_JGT;
0540 case BPF_JSGT: return BPF_JSLE;
0541 case BPF_JSGE: return BPF_JSLT;
0542 case BPF_JSLT: return BPF_JSGE;
0543 case BPF_JSLE: return BPF_JSGT;
0544 }
0545 return 0;
0546 }
0547
0548
0549 static void setup_jmp(struct jit_context *ctx, u8 bpf_op,
0550 s16 bpf_off, u8 *jit_op, s32 *jit_off)
0551 {
0552 u32 *descp = &ctx->descriptors[ctx->bpf_index];
0553 int op = bpf_op;
0554 int offset = 0;
0555
0556
0557 if (INDEX(*descp) == 0)
0558 goto done;
0559
0560
0561 if (bpf_op == JIT_JNOP)
0562 goto done;
0563
0564
0565 if (bpf_op == BPF_JA)
0566 *descp |= JIT_DESC_CONVERT;
0567
0568
0569
0570
0571
0572
0573
0574
0575 if (!CONVERTED(*descp)) {
0576 int target = ctx->bpf_index + bpf_off + 1;
0577 int origin = ctx->bpf_index + 1;
0578
0579 offset = (INDEX(ctx->descriptors[target]) -
0580 INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32);
0581 }
0582
0583
0584
0585
0586
0587
0588 if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) {
0589 offset = 3 * sizeof(u32);
0590 op = invert_jmp(bpf_op);
0591 ctx->changes += !CONVERTED(*descp);
0592 *descp |= JIT_DESC_CONVERT;
0593 }
0594
0595 done:
0596 *jit_off = offset;
0597 *jit_op = op;
0598 }
0599
0600
0601 void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
0602 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
0603 {
0604 bool always = false;
0605 bool never = false;
0606
0607 switch (bpf_op) {
0608 case BPF_JEQ:
0609 case BPF_JNE:
0610 break;
0611 case BPF_JSET:
0612 case BPF_JLT:
0613 never = imm == 0;
0614 break;
0615 case BPF_JGE:
0616 always = imm == 0;
0617 break;
0618 case BPF_JGT:
0619 never = (u32)imm == U32_MAX;
0620 break;
0621 case BPF_JLE:
0622 always = (u32)imm == U32_MAX;
0623 break;
0624 case BPF_JSGT:
0625 never = imm == S32_MAX && width == 32;
0626 break;
0627 case BPF_JSGE:
0628 always = imm == S32_MIN && width == 32;
0629 break;
0630 case BPF_JSLT:
0631 never = imm == S32_MIN && width == 32;
0632 break;
0633 case BPF_JSLE:
0634 always = imm == S32_MAX && width == 32;
0635 break;
0636 }
0637
0638 if (never)
0639 bpf_op = JIT_JNOP;
0640 if (always)
0641 bpf_op = BPF_JA;
0642 setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
0643 }
0644
0645
0646 void setup_jmp_r(struct jit_context *ctx, bool same_reg,
0647 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
0648 {
0649 switch (bpf_op) {
0650 case BPF_JSET:
0651 break;
0652 case BPF_JEQ:
0653 case BPF_JGE:
0654 case BPF_JLE:
0655 case BPF_JSGE:
0656 case BPF_JSLE:
0657 if (same_reg)
0658 bpf_op = BPF_JA;
0659 break;
0660 case BPF_JNE:
0661 case BPF_JLT:
0662 case BPF_JGT:
0663 case BPF_JSGT:
0664 case BPF_JSLT:
0665 if (same_reg)
0666 bpf_op = JIT_JNOP;
0667 break;
0668 }
0669 setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
0670 }
0671
0672
0673 int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off)
0674 {
0675
0676 if (jit_op != JIT_JNOP)
0677 emit(ctx, nop);
0678
0679
0680
0681
0682 if (CONVERTED(ctx->descriptors[ctx->bpf_index])) {
0683 int target = get_target(ctx, ctx->bpf_index + bpf_off + 1);
0684
0685 if (target < 0)
0686 return -1;
0687 emit(ctx, j, target);
0688 emit(ctx, nop);
0689 }
0690 return 0;
0691 }
0692
0693
0694 void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op)
0695 {
0696 switch (op) {
0697
0698 case JIT_JNOP:
0699 break;
0700
0701 case BPF_JSET:
0702 emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
0703 emit(ctx, bnez, MIPS_R_T9, off);
0704 break;
0705
0706 case JIT_JNSET:
0707 emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
0708 emit(ctx, beqz, MIPS_R_T9, off);
0709 break;
0710
0711 case BPF_JGT:
0712 emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
0713 emit(ctx, beqz, MIPS_R_T9, off);
0714 break;
0715
0716 case BPF_JGE:
0717 emit(ctx, sltiu, MIPS_R_T9, dst, imm);
0718 emit(ctx, beqz, MIPS_R_T9, off);
0719 break;
0720
0721 case BPF_JLT:
0722 emit(ctx, sltiu, MIPS_R_T9, dst, imm);
0723 emit(ctx, bnez, MIPS_R_T9, off);
0724 break;
0725
0726 case BPF_JLE:
0727 emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
0728 emit(ctx, bnez, MIPS_R_T9, off);
0729 break;
0730
0731 case BPF_JSGT:
0732 emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
0733 emit(ctx, beqz, MIPS_R_T9, off);
0734 break;
0735
0736 case BPF_JSGE:
0737 emit(ctx, slti, MIPS_R_T9, dst, imm);
0738 emit(ctx, beqz, MIPS_R_T9, off);
0739 break;
0740
0741 case BPF_JSLT:
0742 emit(ctx, slti, MIPS_R_T9, dst, imm);
0743 emit(ctx, bnez, MIPS_R_T9, off);
0744 break;
0745
0746 case BPF_JSLE:
0747 emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
0748 emit(ctx, bnez, MIPS_R_T9, off);
0749 break;
0750 }
0751 }
0752
0753
0754 void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op)
0755 {
0756 switch (op) {
0757
0758 case JIT_JNOP:
0759 break;
0760
0761 case BPF_JEQ:
0762 emit(ctx, beq, dst, src, off);
0763 break;
0764
0765 case BPF_JNE:
0766 emit(ctx, bne, dst, src, off);
0767 break;
0768
0769 case BPF_JSET:
0770 emit(ctx, and, MIPS_R_T9, dst, src);
0771 emit(ctx, bnez, MIPS_R_T9, off);
0772 break;
0773
0774 case JIT_JNSET:
0775 emit(ctx, and, MIPS_R_T9, dst, src);
0776 emit(ctx, beqz, MIPS_R_T9, off);
0777 break;
0778
0779 case BPF_JGT:
0780 emit(ctx, sltu, MIPS_R_T9, src, dst);
0781 emit(ctx, bnez, MIPS_R_T9, off);
0782 break;
0783
0784 case BPF_JGE:
0785 emit(ctx, sltu, MIPS_R_T9, dst, src);
0786 emit(ctx, beqz, MIPS_R_T9, off);
0787 break;
0788
0789 case BPF_JLT:
0790 emit(ctx, sltu, MIPS_R_T9, dst, src);
0791 emit(ctx, bnez, MIPS_R_T9, off);
0792 break;
0793
0794 case BPF_JLE:
0795 emit(ctx, sltu, MIPS_R_T9, src, dst);
0796 emit(ctx, beqz, MIPS_R_T9, off);
0797 break;
0798
0799 case BPF_JSGT:
0800 emit(ctx, slt, MIPS_R_T9, src, dst);
0801 emit(ctx, bnez, MIPS_R_T9, off);
0802 break;
0803
0804 case BPF_JSGE:
0805 emit(ctx, slt, MIPS_R_T9, dst, src);
0806 emit(ctx, beqz, MIPS_R_T9, off);
0807 break;
0808
0809 case BPF_JSLT:
0810 emit(ctx, slt, MIPS_R_T9, dst, src);
0811 emit(ctx, bnez, MIPS_R_T9, off);
0812 break;
0813
0814 case BPF_JSLE:
0815 emit(ctx, slt, MIPS_R_T9, src, dst);
0816 emit(ctx, beqz, MIPS_R_T9, off);
0817 break;
0818 }
0819 }
0820
0821
0822 int emit_ja(struct jit_context *ctx, s16 off)
0823 {
0824 int target = get_target(ctx, ctx->bpf_index + off + 1);
0825
0826 if (target < 0)
0827 return -1;
0828 emit(ctx, j, target);
0829 emit(ctx, nop);
0830 return 0;
0831 }
0832
0833
0834 int emit_exit(struct jit_context *ctx)
0835 {
0836 int target = get_target(ctx, ctx->program->len);
0837
0838 if (target < 0)
0839 return -1;
0840 emit(ctx, j, target);
0841 emit(ctx, nop);
0842 return 0;
0843 }
0844
0845
0846 static int build_body(struct jit_context *ctx)
0847 {
0848 const struct bpf_prog *prog = ctx->program;
0849 unsigned int i;
0850
0851 ctx->stack_used = 0;
0852 for (i = 0; i < prog->len; i++) {
0853 const struct bpf_insn *insn = &prog->insnsi[i];
0854 u32 *descp = &ctx->descriptors[i];
0855 int ret;
0856
0857 access_reg(ctx, insn->src_reg);
0858 access_reg(ctx, insn->dst_reg);
0859
0860 ctx->bpf_index = i;
0861 if (ctx->target == NULL) {
0862 ctx->changes += INDEX(*descp) != ctx->jit_index;
0863 *descp &= JIT_DESC_CONVERT;
0864 *descp |= ctx->jit_index;
0865 }
0866
0867 ret = build_insn(insn, ctx);
0868 if (ret < 0)
0869 return ret;
0870
0871 if (ret > 0) {
0872 i++;
0873 if (ctx->target == NULL)
0874 descp[1] = ctx->jit_index;
0875 }
0876 }
0877
0878
0879 ctx->descriptors[prog->len] = ctx->jit_index;
0880 return 0;
0881 }
0882
0883
0884 static void set_convert_flag(struct jit_context *ctx, bool enable)
0885 {
0886 const struct bpf_prog *prog = ctx->program;
0887 u32 flag = enable ? JIT_DESC_CONVERT : 0;
0888 unsigned int i;
0889
0890 for (i = 0; i <= prog->len; i++)
0891 ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag;
0892 }
0893
0894 static void jit_fill_hole(void *area, unsigned int size)
0895 {
0896 u32 *p;
0897
0898
0899 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
0900 uasm_i_break(&p, BRK_BUG);
0901 }
0902
0903 bool bpf_jit_needs_zext(void)
0904 {
0905 return true;
0906 }
0907
0908 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
0909 {
0910 struct bpf_prog *tmp, *orig_prog = prog;
0911 struct bpf_binary_header *header = NULL;
0912 struct jit_context ctx;
0913 bool tmp_blinded = false;
0914 unsigned int tmp_idx;
0915 unsigned int image_size;
0916 u8 *image_ptr;
0917 int tries;
0918
0919
0920
0921
0922
0923 if (!prog->jit_requested)
0924 return orig_prog;
0925
0926
0927
0928
0929
0930 tmp = bpf_jit_blind_constants(prog);
0931 if (IS_ERR(tmp))
0932 return orig_prog;
0933 if (tmp != prog) {
0934 tmp_blinded = true;
0935 prog = tmp;
0936 }
0937
0938 memset(&ctx, 0, sizeof(ctx));
0939 ctx.program = prog;
0940
0941
0942
0943
0944
0945 ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors),
0946 GFP_KERNEL);
0947 if (ctx.descriptors == NULL)
0948 goto out_err;
0949
0950
0951 if (build_body(&ctx) < 0)
0952 goto out_err;
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 ctx.jit_index = 0;
0964 build_prologue(&ctx);
0965 tmp_idx = ctx.jit_index;
0966
0967 tries = JIT_MAX_ITERATIONS;
0968 do {
0969 ctx.jit_index = tmp_idx;
0970 ctx.changes = 0;
0971 if (tries == 2)
0972 set_convert_flag(&ctx, true);
0973 if (build_body(&ctx) < 0)
0974 goto out_err;
0975 } while (ctx.changes > 0 && --tries > 0);
0976
0977 if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge"))
0978 goto out_err;
0979
0980 build_epilogue(&ctx, MIPS_R_RA);
0981
0982
0983 image_size = sizeof(u32) * ctx.jit_index;
0984 header = bpf_jit_binary_alloc(image_size, &image_ptr,
0985 sizeof(u32), jit_fill_hole);
0986
0987
0988
0989
0990 if (header == NULL)
0991 goto out_err;
0992
0993
0994 ctx.target = (u32 *)image_ptr;
0995 ctx.jit_index = 0;
0996
0997
0998
0999
1000
1001 build_prologue(&ctx);
1002 if (build_body(&ctx) < 0)
1003 goto out_err;
1004 build_epilogue(&ctx, MIPS_R_RA);
1005
1006
1007 set_convert_flag(&ctx, false);
1008 bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
1009
1010
1011 bpf_jit_binary_lock_ro(header);
1012 flush_icache_range((unsigned long)header,
1013 (unsigned long)&ctx.target[ctx.jit_index]);
1014
1015 if (bpf_jit_enable > 1)
1016 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1017
1018 prog->bpf_func = (void *)ctx.target;
1019 prog->jited = 1;
1020 prog->jited_len = image_size;
1021
1022 out:
1023 if (tmp_blinded)
1024 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1025 tmp : orig_prog);
1026 kfree(ctx.descriptors);
1027 return prog;
1028
1029 out_err:
1030 prog = orig_prog;
1031 if (header)
1032 bpf_jit_binary_free(header);
1033 goto out;
1034 }