Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Just-In-Time compiler for eBPF bytecode on MIPS.
0004  * Implementation of JIT functions common to 32-bit and 64-bit CPUs.
0005  *
0006  * Copyright (c) 2021 Anyfi Networks AB.
0007  * Author: Johan Almbladh <johan.almbladh@gmail.com>
0008  *
0009  * Based on code and ideas from
0010  * Copyright (c) 2017 Cavium, Inc.
0011  * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
0012  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
0013  */
0014 
0015 /*
0016  * Code overview
0017  * =============
0018  *
0019  * - bpf_jit_comp.h
0020  *   Common definitions and utilities.
0021  *
0022  * - bpf_jit_comp.c
0023  *   Implementation of JIT top-level logic and exported JIT API functions.
0024  *   Implementation of internal operations shared by 32-bit and 64-bit code.
0025  *   JMP and ALU JIT control code, register control code, shared ALU and
0026  *   JMP/JMP32 JIT operations.
0027  *
0028  * - bpf_jit_comp32.c
0029  *   Implementation of functions to JIT prologue, epilogue and a single eBPF
0030  *   instruction for 32-bit MIPS CPUs. The functions use shared operations
0031  *   where possible, and implement the rest for 32-bit MIPS such as ALU64
0032  *   operations.
0033  *
0034  * - bpf_jit_comp64.c
0035  *   Ditto, for 64-bit MIPS CPUs.
0036  *
0037  * Zero and sign extension
0038  * ========================
0039  * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension,
0040  * but the eBPF instruction set mandates zero extension. We let the verifier
0041  * insert explicit zero-extensions after 32-bit ALU operations, both for
0042  * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs
0043  * are JITed with sign extensions inserted when so expected.
0044  *
0045  * ALU operations
0046  * ==============
0047  * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are
0048  * JITed in the following steps. ALU64 operations on 32-bit MIPS are more
0049  * complicated and therefore only processed by special implementations in
0050  * step (3).
0051  *
0052  * 1) valid_alu_i:
0053  *    Determine if an immediate operation can be emitted as such, or if
0054  *    we must fall back to the register version.
0055  *
0056  * 2) rewrite_alu_i:
0057  *    Convert BPF operation and immediate value to a canonical form for
0058  *    JITing. In some degenerate cases this form may be a no-op.
0059  *
0060  * 3) emit_alu_{i,i64,r,64}:
0061  *    Emit instructions for an ALU or ALU64 immediate or register operation.
0062  *
0063  * JMP operations
0064  * ==============
0065  * JMP and JMP32 operations require an JIT instruction offset table for
0066  * translating the jump offset. This table is computed by dry-running the
0067  * JIT without actually emitting anything. However, the computed PC-relative
0068  * offset may overflow the 18-bit offset field width of the native MIPS
0069  * branch instruction. In such cases, the long jump is converted into the
0070  * following sequence.
0071  *
0072  *    <branch> !<cond> +2    Inverted PC-relative branch
0073  *    nop                    Delay slot
0074  *    j <offset>             Unconditional absolute long jump
0075  *    nop                    Delay slot
0076  *
0077  * Since this converted sequence alters the offset table, all offsets must
0078  * be re-calculated. This may in turn trigger new branch conversions, so
0079  * the process is repeated until no further changes are made. Normally it
0080  * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we
0081  * fall back to converting every remaining jump operation. The branch
0082  * conversion is independent of how the JMP or JMP32 condition is JITed.
0083  *
0084  * JMP32 and JMP operations are JITed as follows.
0085  *
0086  * 1) setup_jmp_{i,r}:
0087  *    Convert jump conditional and offset into a form that can be JITed.
0088  *    This form may be a no-op, a canonical form, or an inverted PC-relative
0089  *    jump if branch conversion is necessary.
0090  *
0091  * 2) valid_jmp_i:
0092  *    Determine if an immediate operations can be emitted as such, or if
0093  *    we must fall back to the register version. Applies to JMP32 for 32-bit
0094  *    MIPS, and both JMP and JMP32 for 64-bit MIPS.
0095  *
0096  * 3) emit_jmp_{i,i64,r,r64}:
0097  *    Emit instructions for an JMP or JMP32 immediate or register operation.
0098  *
0099  * 4) finish_jmp_{i,r}:
0100  *    Emit any instructions needed to finish the jump. This includes a nop
0101  *    for the delay slot if a branch was emitted, and a long absolute jump
0102  *    if the branch was converted.
0103  */
0104 
0105 #include <linux/limits.h>
0106 #include <linux/bitops.h>
0107 #include <linux/errno.h>
0108 #include <linux/filter.h>
0109 #include <linux/bpf.h>
0110 #include <linux/slab.h>
0111 #include <asm/bitops.h>
0112 #include <asm/cacheflush.h>
0113 #include <asm/cpu-features.h>
0114 #include <asm/isa-rev.h>
0115 #include <asm/uasm.h>
0116 
0117 #include "bpf_jit_comp.h"
0118 
0119 /* Convenience macros for descriptor access */
0120 #define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT)
0121 #define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT)
0122 
0123 /*
0124  * Push registers on the stack, starting at a given depth from the stack
0125  * pointer and increasing. The next depth to be written is returned.
0126  */
0127 int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
0128 {
0129     int reg;
0130 
0131     for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
0132         if (mask & BIT(reg)) {
0133             if ((excl & BIT(reg)) == 0) {
0134                 if (sizeof(long) == 4)
0135                     emit(ctx, sw, reg, depth, MIPS_R_SP);
0136                 else /* sizeof(long) == 8 */
0137                     emit(ctx, sd, reg, depth, MIPS_R_SP);
0138             }
0139             depth += sizeof(long);
0140         }
0141 
0142     ctx->stack_used = max((int)ctx->stack_used, depth);
0143     return depth;
0144 }
0145 
0146 /*
0147  * Pop registers from the stack, starting at a given depth from the stack
0148  * pointer and increasing. The next depth to be read is returned.
0149  */
0150 int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
0151 {
0152     int reg;
0153 
0154     for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
0155         if (mask & BIT(reg)) {
0156             if ((excl & BIT(reg)) == 0) {
0157                 if (sizeof(long) == 4)
0158                     emit(ctx, lw, reg, depth, MIPS_R_SP);
0159                 else /* sizeof(long) == 8 */
0160                     emit(ctx, ld, reg, depth, MIPS_R_SP);
0161             }
0162             depth += sizeof(long);
0163         }
0164 
0165     return depth;
0166 }
0167 
0168 /* Compute the 28-bit jump target address from a BPF program location */
0169 int get_target(struct jit_context *ctx, u32 loc)
0170 {
0171     u32 index = INDEX(ctx->descriptors[loc]);
0172     unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index];
0173     unsigned long addr = (unsigned long)&ctx->target[index];
0174 
0175     if (!ctx->target)
0176         return 0;
0177 
0178     if ((addr ^ pc) & ~MIPS_JMP_MASK)
0179         return -1;
0180 
0181     return addr & MIPS_JMP_MASK;
0182 }
0183 
0184 /* Compute the PC-relative offset to relative BPF program offset */
0185 int get_offset(const struct jit_context *ctx, int off)
0186 {
0187     return (INDEX(ctx->descriptors[ctx->bpf_index + off]) -
0188         ctx->jit_index - 1) * sizeof(u32);
0189 }
0190 
0191 /* dst = imm (register width) */
0192 void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm)
0193 {
0194     if (imm >= -0x8000 && imm <= 0x7fff) {
0195         emit(ctx, addiu, dst, MIPS_R_ZERO, imm);
0196     } else {
0197         emit(ctx, lui, dst, (s16)((u32)imm >> 16));
0198         emit(ctx, ori, dst, dst, (u16)(imm & 0xffff));
0199     }
0200     clobber_reg(ctx, dst);
0201 }
0202 
0203 /* dst = src (register width) */
0204 void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src)
0205 {
0206     emit(ctx, ori, dst, src, 0);
0207     clobber_reg(ctx, dst);
0208 }
0209 
0210 /* Validate ALU immediate range */
0211 bool valid_alu_i(u8 op, s32 imm)
0212 {
0213     switch (BPF_OP(op)) {
0214     case BPF_NEG:
0215     case BPF_LSH:
0216     case BPF_RSH:
0217     case BPF_ARSH:
0218         /* All legal eBPF values are valid */
0219         return true;
0220     case BPF_ADD:
0221         /* imm must be 16 bits */
0222         return imm >= -0x8000 && imm <= 0x7fff;
0223     case BPF_SUB:
0224         /* -imm must be 16 bits */
0225         return imm >= -0x7fff && imm <= 0x8000;
0226     case BPF_AND:
0227     case BPF_OR:
0228     case BPF_XOR:
0229         /* imm must be 16 bits unsigned */
0230         return imm >= 0 && imm <= 0xffff;
0231     case BPF_MUL:
0232         /* imm must be zero or a positive power of two */
0233         return imm == 0 || (imm > 0 && is_power_of_2(imm));
0234     case BPF_DIV:
0235     case BPF_MOD:
0236         /* imm must be an 17-bit power of two */
0237         return (u32)imm <= 0x10000 && is_power_of_2((u32)imm);
0238     }
0239     return false;
0240 }
0241 
0242 /* Rewrite ALU immediate operation */
0243 bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val)
0244 {
0245     bool act = true;
0246 
0247     switch (BPF_OP(op)) {
0248     case BPF_LSH:
0249     case BPF_RSH:
0250     case BPF_ARSH:
0251     case BPF_ADD:
0252     case BPF_SUB:
0253     case BPF_OR:
0254     case BPF_XOR:
0255         /* imm == 0 is a no-op */
0256         act = imm != 0;
0257         break;
0258     case BPF_MUL:
0259         if (imm == 1) {
0260             /* dst * 1 is a no-op */
0261             act = false;
0262         } else if (imm == 0) {
0263             /* dst * 0 is dst & 0 */
0264             op = BPF_AND;
0265         } else {
0266             /* dst * (1 << n) is dst << n */
0267             op = BPF_LSH;
0268             imm = ilog2(abs(imm));
0269         }
0270         break;
0271     case BPF_DIV:
0272         if (imm == 1) {
0273             /* dst / 1 is a no-op */
0274             act = false;
0275         } else {
0276             /* dst / (1 << n) is dst >> n */
0277             op = BPF_RSH;
0278             imm = ilog2(imm);
0279         }
0280         break;
0281     case BPF_MOD:
0282         /* dst % (1 << n) is dst & ((1 << n) - 1) */
0283         op = BPF_AND;
0284         imm--;
0285         break;
0286     }
0287 
0288     *alu = op;
0289     *val = imm;
0290     return act;
0291 }
0292 
0293 /* ALU immediate operation (32-bit) */
0294 void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op)
0295 {
0296     switch (BPF_OP(op)) {
0297     /* dst = -dst */
0298     case BPF_NEG:
0299         emit(ctx, subu, dst, MIPS_R_ZERO, dst);
0300         break;
0301     /* dst = dst & imm */
0302     case BPF_AND:
0303         emit(ctx, andi, dst, dst, (u16)imm);
0304         break;
0305     /* dst = dst | imm */
0306     case BPF_OR:
0307         emit(ctx, ori, dst, dst, (u16)imm);
0308         break;
0309     /* dst = dst ^ imm */
0310     case BPF_XOR:
0311         emit(ctx, xori, dst, dst, (u16)imm);
0312         break;
0313     /* dst = dst << imm */
0314     case BPF_LSH:
0315         emit(ctx, sll, dst, dst, imm);
0316         break;
0317     /* dst = dst >> imm */
0318     case BPF_RSH:
0319         emit(ctx, srl, dst, dst, imm);
0320         break;
0321     /* dst = dst >> imm (arithmetic) */
0322     case BPF_ARSH:
0323         emit(ctx, sra, dst, dst, imm);
0324         break;
0325     /* dst = dst + imm */
0326     case BPF_ADD:
0327         emit(ctx, addiu, dst, dst, imm);
0328         break;
0329     /* dst = dst - imm */
0330     case BPF_SUB:
0331         emit(ctx, addiu, dst, dst, -imm);
0332         break;
0333     }
0334     clobber_reg(ctx, dst);
0335 }
0336 
0337 /* ALU register operation (32-bit) */
0338 void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op)
0339 {
0340     switch (BPF_OP(op)) {
0341     /* dst = dst & src */
0342     case BPF_AND:
0343         emit(ctx, and, dst, dst, src);
0344         break;
0345     /* dst = dst | src */
0346     case BPF_OR:
0347         emit(ctx, or, dst, dst, src);
0348         break;
0349     /* dst = dst ^ src */
0350     case BPF_XOR:
0351         emit(ctx, xor, dst, dst, src);
0352         break;
0353     /* dst = dst << src */
0354     case BPF_LSH:
0355         emit(ctx, sllv, dst, dst, src);
0356         break;
0357     /* dst = dst >> src */
0358     case BPF_RSH:
0359         emit(ctx, srlv, dst, dst, src);
0360         break;
0361     /* dst = dst >> src (arithmetic) */
0362     case BPF_ARSH:
0363         emit(ctx, srav, dst, dst, src);
0364         break;
0365     /* dst = dst + src */
0366     case BPF_ADD:
0367         emit(ctx, addu, dst, dst, src);
0368         break;
0369     /* dst = dst - src */
0370     case BPF_SUB:
0371         emit(ctx, subu, dst, dst, src);
0372         break;
0373     /* dst = dst * src */
0374     case BPF_MUL:
0375         if (cpu_has_mips32r1 || cpu_has_mips32r6) {
0376             emit(ctx, mul, dst, dst, src);
0377         } else {
0378             emit(ctx, multu, dst, src);
0379             emit(ctx, mflo, dst);
0380         }
0381         break;
0382     /* dst = dst / src */
0383     case BPF_DIV:
0384         if (cpu_has_mips32r6) {
0385             emit(ctx, divu_r6, dst, dst, src);
0386         } else {
0387             emit(ctx, divu, dst, src);
0388             emit(ctx, mflo, dst);
0389         }
0390         break;
0391     /* dst = dst % src */
0392     case BPF_MOD:
0393         if (cpu_has_mips32r6) {
0394             emit(ctx, modu, dst, dst, src);
0395         } else {
0396             emit(ctx, divu, dst, src);
0397             emit(ctx, mfhi, dst);
0398         }
0399         break;
0400     }
0401     clobber_reg(ctx, dst);
0402 }
0403 
0404 /* Atomic read-modify-write (32-bit) */
0405 void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code)
0406 {
0407     LLSC_sync(ctx);
0408     emit(ctx, ll, MIPS_R_T9, off, dst);
0409     switch (code) {
0410     case BPF_ADD:
0411     case BPF_ADD | BPF_FETCH:
0412         emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src);
0413         break;
0414     case BPF_AND:
0415     case BPF_AND | BPF_FETCH:
0416         emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src);
0417         break;
0418     case BPF_OR:
0419     case BPF_OR | BPF_FETCH:
0420         emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src);
0421         break;
0422     case BPF_XOR:
0423     case BPF_XOR | BPF_FETCH:
0424         emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src);
0425         break;
0426     case BPF_XCHG:
0427         emit(ctx, move, MIPS_R_T8, src);
0428         break;
0429     }
0430     emit(ctx, sc, MIPS_R_T8, off, dst);
0431     emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset);
0432     emit(ctx, nop); /* Delay slot */
0433 
0434     if (code & BPF_FETCH) {
0435         emit(ctx, move, src, MIPS_R_T9);
0436         clobber_reg(ctx, src);
0437     }
0438 }
0439 
0440 /* Atomic compare-and-exchange (32-bit) */
0441 void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off)
0442 {
0443     LLSC_sync(ctx);
0444     emit(ctx, ll, MIPS_R_T9, off, dst);
0445     emit(ctx, bne, MIPS_R_T9, res, 12);
0446     emit(ctx, move, MIPS_R_T8, src);     /* Delay slot */
0447     emit(ctx, sc, MIPS_R_T8, off, dst);
0448     emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset);
0449     emit(ctx, move, res, MIPS_R_T9);     /* Delay slot */
0450     clobber_reg(ctx, res);
0451 }
0452 
0453 /* Swap bytes and truncate a register word or half word */
0454 void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width)
0455 {
0456     u8 tmp = MIPS_R_T8;
0457     u8 msk = MIPS_R_T9;
0458 
0459     switch (width) {
0460     /* Swap bytes in a word */
0461     case 32:
0462         if (cpu_has_mips32r2 || cpu_has_mips32r6) {
0463             emit(ctx, wsbh, dst, dst);
0464             emit(ctx, rotr, dst, dst, 16);
0465         } else {
0466             emit(ctx, sll, tmp, dst, 16);    /* tmp  = dst << 16 */
0467             emit(ctx, srl, dst, dst, 16);    /* dst = dst >> 16  */
0468             emit(ctx, or, dst, dst, tmp);    /* dst = dst | tmp  */
0469 
0470             emit(ctx, lui, msk, 0xff);       /* msk = 0x00ff0000 */
0471             emit(ctx, ori, msk, msk, 0xff);  /* msk = msk | 0xff */
0472 
0473             emit(ctx, and, tmp, dst, msk);   /* tmp = dst & msk  */
0474             emit(ctx, sll, tmp, tmp, 8);     /* tmp = tmp << 8   */
0475             emit(ctx, srl, dst, dst, 8);     /* dst = dst >> 8   */
0476             emit(ctx, and, dst, dst, msk);   /* dst = dst & msk  */
0477             emit(ctx, or, dst, dst, tmp);    /* reg = dst | tmp  */
0478         }
0479         break;
0480     /* Swap bytes in a half word */
0481     case 16:
0482         if (cpu_has_mips32r2 || cpu_has_mips32r6) {
0483             emit(ctx, wsbh, dst, dst);
0484             emit(ctx, andi, dst, dst, 0xffff);
0485         } else {
0486             emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */
0487             emit(ctx, srl, tmp, tmp, 8);       /* t = t >> 8     */
0488             emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */
0489             emit(ctx, sll, dst, dst, 8);       /* d = d << 8     */
0490             emit(ctx, or,  dst, dst, tmp);     /* d = d | t      */
0491         }
0492         break;
0493     }
0494     clobber_reg(ctx, dst);
0495 }
0496 
0497 /* Validate jump immediate range */
0498 bool valid_jmp_i(u8 op, s32 imm)
0499 {
0500     switch (op) {
0501     case JIT_JNOP:
0502         /* Immediate value not used */
0503         return true;
0504     case BPF_JEQ:
0505     case BPF_JNE:
0506         /* No immediate operation */
0507         return false;
0508     case BPF_JSET:
0509     case JIT_JNSET:
0510         /* imm must be 16 bits unsigned */
0511         return imm >= 0 && imm <= 0xffff;
0512     case BPF_JGE:
0513     case BPF_JLT:
0514     case BPF_JSGE:
0515     case BPF_JSLT:
0516         /* imm must be 16 bits */
0517         return imm >= -0x8000 && imm <= 0x7fff;
0518     case BPF_JGT:
0519     case BPF_JLE:
0520     case BPF_JSGT:
0521     case BPF_JSLE:
0522         /* imm + 1 must be 16 bits */
0523         return imm >= -0x8001 && imm <= 0x7ffe;
0524     }
0525     return false;
0526 }
0527 
0528 /* Invert a conditional jump operation */
0529 static u8 invert_jmp(u8 op)
0530 {
0531     switch (op) {
0532     case BPF_JA: return JIT_JNOP;
0533     case BPF_JEQ: return BPF_JNE;
0534     case BPF_JNE: return BPF_JEQ;
0535     case BPF_JSET: return JIT_JNSET;
0536     case BPF_JGT: return BPF_JLE;
0537     case BPF_JGE: return BPF_JLT;
0538     case BPF_JLT: return BPF_JGE;
0539     case BPF_JLE: return BPF_JGT;
0540     case BPF_JSGT: return BPF_JSLE;
0541     case BPF_JSGE: return BPF_JSLT;
0542     case BPF_JSLT: return BPF_JSGE;
0543     case BPF_JSLE: return BPF_JSGT;
0544     }
0545     return 0;
0546 }
0547 
0548 /* Prepare a PC-relative jump operation */
0549 static void setup_jmp(struct jit_context *ctx, u8 bpf_op,
0550               s16 bpf_off, u8 *jit_op, s32 *jit_off)
0551 {
0552     u32 *descp = &ctx->descriptors[ctx->bpf_index];
0553     int op = bpf_op;
0554     int offset = 0;
0555 
0556     /* Do not compute offsets on the first pass */
0557     if (INDEX(*descp) == 0)
0558         goto done;
0559 
0560     /* Skip jumps never taken */
0561     if (bpf_op == JIT_JNOP)
0562         goto done;
0563 
0564     /* Convert jumps always taken */
0565     if (bpf_op == BPF_JA)
0566         *descp |= JIT_DESC_CONVERT;
0567 
0568     /*
0569      * Current ctx->jit_index points to the start of the branch preamble.
0570      * Since the preamble differs among different branch conditionals,
0571      * the current index cannot be used to compute the branch offset.
0572      * Instead, we use the offset table value for the next instruction,
0573      * which gives the index immediately after the branch delay slot.
0574      */
0575     if (!CONVERTED(*descp)) {
0576         int target = ctx->bpf_index + bpf_off + 1;
0577         int origin = ctx->bpf_index + 1;
0578 
0579         offset = (INDEX(ctx->descriptors[target]) -
0580               INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32);
0581     }
0582 
0583     /*
0584      * The PC-relative branch offset field on MIPS is 18 bits signed,
0585      * so if the computed offset is larger than this we generate a an
0586      * absolute jump that we skip with an inverted conditional branch.
0587      */
0588     if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) {
0589         offset = 3 * sizeof(u32);
0590         op = invert_jmp(bpf_op);
0591         ctx->changes += !CONVERTED(*descp);
0592         *descp |= JIT_DESC_CONVERT;
0593     }
0594 
0595 done:
0596     *jit_off = offset;
0597     *jit_op = op;
0598 }
0599 
0600 /* Prepare a PC-relative jump operation with immediate conditional */
0601 void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
0602          u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
0603 {
0604     bool always = false;
0605     bool never = false;
0606 
0607     switch (bpf_op) {
0608     case BPF_JEQ:
0609     case BPF_JNE:
0610         break;
0611     case BPF_JSET:
0612     case BPF_JLT:
0613         never = imm == 0;
0614         break;
0615     case BPF_JGE:
0616         always = imm == 0;
0617         break;
0618     case BPF_JGT:
0619         never = (u32)imm == U32_MAX;
0620         break;
0621     case BPF_JLE:
0622         always = (u32)imm == U32_MAX;
0623         break;
0624     case BPF_JSGT:
0625         never = imm == S32_MAX && width == 32;
0626         break;
0627     case BPF_JSGE:
0628         always = imm == S32_MIN && width == 32;
0629         break;
0630     case BPF_JSLT:
0631         never = imm == S32_MIN && width == 32;
0632         break;
0633     case BPF_JSLE:
0634         always = imm == S32_MAX && width == 32;
0635         break;
0636     }
0637 
0638     if (never)
0639         bpf_op = JIT_JNOP;
0640     if (always)
0641         bpf_op = BPF_JA;
0642     setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
0643 }
0644 
0645 /* Prepare a PC-relative jump operation with register conditional */
0646 void setup_jmp_r(struct jit_context *ctx, bool same_reg,
0647          u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
0648 {
0649     switch (bpf_op) {
0650     case BPF_JSET:
0651         break;
0652     case BPF_JEQ:
0653     case BPF_JGE:
0654     case BPF_JLE:
0655     case BPF_JSGE:
0656     case BPF_JSLE:
0657         if (same_reg)
0658             bpf_op = BPF_JA;
0659         break;
0660     case BPF_JNE:
0661     case BPF_JLT:
0662     case BPF_JGT:
0663     case BPF_JSGT:
0664     case BPF_JSLT:
0665         if (same_reg)
0666             bpf_op = JIT_JNOP;
0667         break;
0668     }
0669     setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
0670 }
0671 
0672 /* Finish a PC-relative jump operation */
0673 int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off)
0674 {
0675     /* Emit conditional branch delay slot */
0676     if (jit_op != JIT_JNOP)
0677         emit(ctx, nop);
0678     /*
0679      * Emit an absolute long jump with delay slot,
0680      * if the PC-relative branch was converted.
0681      */
0682     if (CONVERTED(ctx->descriptors[ctx->bpf_index])) {
0683         int target = get_target(ctx, ctx->bpf_index + bpf_off + 1);
0684 
0685         if (target < 0)
0686             return -1;
0687         emit(ctx, j, target);
0688         emit(ctx, nop);
0689     }
0690     return 0;
0691 }
0692 
0693 /* Jump immediate (32-bit) */
0694 void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op)
0695 {
0696     switch (op) {
0697     /* No-op, used internally for branch optimization */
0698     case JIT_JNOP:
0699         break;
0700     /* PC += off if dst & imm */
0701     case BPF_JSET:
0702         emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
0703         emit(ctx, bnez, MIPS_R_T9, off);
0704         break;
0705     /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
0706     case JIT_JNSET:
0707         emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
0708         emit(ctx, beqz, MIPS_R_T9, off);
0709         break;
0710     /* PC += off if dst > imm */
0711     case BPF_JGT:
0712         emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
0713         emit(ctx, beqz, MIPS_R_T9, off);
0714         break;
0715     /* PC += off if dst >= imm */
0716     case BPF_JGE:
0717         emit(ctx, sltiu, MIPS_R_T9, dst, imm);
0718         emit(ctx, beqz, MIPS_R_T9, off);
0719         break;
0720     /* PC += off if dst < imm */
0721     case BPF_JLT:
0722         emit(ctx, sltiu, MIPS_R_T9, dst, imm);
0723         emit(ctx, bnez, MIPS_R_T9, off);
0724         break;
0725     /* PC += off if dst <= imm */
0726     case BPF_JLE:
0727         emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
0728         emit(ctx, bnez, MIPS_R_T9, off);
0729         break;
0730     /* PC += off if dst > imm (signed) */
0731     case BPF_JSGT:
0732         emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
0733         emit(ctx, beqz, MIPS_R_T9, off);
0734         break;
0735     /* PC += off if dst >= imm (signed) */
0736     case BPF_JSGE:
0737         emit(ctx, slti, MIPS_R_T9, dst, imm);
0738         emit(ctx, beqz, MIPS_R_T9, off);
0739         break;
0740     /* PC += off if dst < imm (signed) */
0741     case BPF_JSLT:
0742         emit(ctx, slti, MIPS_R_T9, dst, imm);
0743         emit(ctx, bnez, MIPS_R_T9, off);
0744         break;
0745     /* PC += off if dst <= imm (signed) */
0746     case BPF_JSLE:
0747         emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
0748         emit(ctx, bnez, MIPS_R_T9, off);
0749         break;
0750     }
0751 }
0752 
0753 /* Jump register (32-bit) */
0754 void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op)
0755 {
0756     switch (op) {
0757     /* No-op, used internally for branch optimization */
0758     case JIT_JNOP:
0759         break;
0760     /* PC += off if dst == src */
0761     case BPF_JEQ:
0762         emit(ctx, beq, dst, src, off);
0763         break;
0764     /* PC += off if dst != src */
0765     case BPF_JNE:
0766         emit(ctx, bne, dst, src, off);
0767         break;
0768     /* PC += off if dst & src */
0769     case BPF_JSET:
0770         emit(ctx, and, MIPS_R_T9, dst, src);
0771         emit(ctx, bnez, MIPS_R_T9, off);
0772         break;
0773     /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
0774     case JIT_JNSET:
0775         emit(ctx, and, MIPS_R_T9, dst, src);
0776         emit(ctx, beqz, MIPS_R_T9, off);
0777         break;
0778     /* PC += off if dst > src */
0779     case BPF_JGT:
0780         emit(ctx, sltu, MIPS_R_T9, src, dst);
0781         emit(ctx, bnez, MIPS_R_T9, off);
0782         break;
0783     /* PC += off if dst >= src */
0784     case BPF_JGE:
0785         emit(ctx, sltu, MIPS_R_T9, dst, src);
0786         emit(ctx, beqz, MIPS_R_T9, off);
0787         break;
0788     /* PC += off if dst < src */
0789     case BPF_JLT:
0790         emit(ctx, sltu, MIPS_R_T9, dst, src);
0791         emit(ctx, bnez, MIPS_R_T9, off);
0792         break;
0793     /* PC += off if dst <= src */
0794     case BPF_JLE:
0795         emit(ctx, sltu, MIPS_R_T9, src, dst);
0796         emit(ctx, beqz, MIPS_R_T9, off);
0797         break;
0798     /* PC += off if dst > src (signed) */
0799     case BPF_JSGT:
0800         emit(ctx, slt, MIPS_R_T9, src, dst);
0801         emit(ctx, bnez, MIPS_R_T9, off);
0802         break;
0803     /* PC += off if dst >= src (signed) */
0804     case BPF_JSGE:
0805         emit(ctx, slt, MIPS_R_T9, dst, src);
0806         emit(ctx, beqz, MIPS_R_T9, off);
0807         break;
0808     /* PC += off if dst < src (signed) */
0809     case BPF_JSLT:
0810         emit(ctx, slt, MIPS_R_T9, dst, src);
0811         emit(ctx, bnez, MIPS_R_T9, off);
0812         break;
0813     /* PC += off if dst <= src (signed) */
0814     case BPF_JSLE:
0815         emit(ctx, slt, MIPS_R_T9, src, dst);
0816         emit(ctx, beqz, MIPS_R_T9, off);
0817         break;
0818     }
0819 }
0820 
0821 /* Jump always */
0822 int emit_ja(struct jit_context *ctx, s16 off)
0823 {
0824     int target = get_target(ctx, ctx->bpf_index + off + 1);
0825 
0826     if (target < 0)
0827         return -1;
0828     emit(ctx, j, target);
0829     emit(ctx, nop);
0830     return 0;
0831 }
0832 
0833 /* Jump to epilogue */
0834 int emit_exit(struct jit_context *ctx)
0835 {
0836     int target = get_target(ctx, ctx->program->len);
0837 
0838     if (target < 0)
0839         return -1;
0840     emit(ctx, j, target);
0841     emit(ctx, nop);
0842     return 0;
0843 }
0844 
0845 /* Build the program body from eBPF bytecode */
0846 static int build_body(struct jit_context *ctx)
0847 {
0848     const struct bpf_prog *prog = ctx->program;
0849     unsigned int i;
0850 
0851     ctx->stack_used = 0;
0852     for (i = 0; i < prog->len; i++) {
0853         const struct bpf_insn *insn = &prog->insnsi[i];
0854         u32 *descp = &ctx->descriptors[i];
0855         int ret;
0856 
0857         access_reg(ctx, insn->src_reg);
0858         access_reg(ctx, insn->dst_reg);
0859 
0860         ctx->bpf_index = i;
0861         if (ctx->target == NULL) {
0862             ctx->changes += INDEX(*descp) != ctx->jit_index;
0863             *descp &= JIT_DESC_CONVERT;
0864             *descp |= ctx->jit_index;
0865         }
0866 
0867         ret = build_insn(insn, ctx);
0868         if (ret < 0)
0869             return ret;
0870 
0871         if (ret > 0) {
0872             i++;
0873             if (ctx->target == NULL)
0874                 descp[1] = ctx->jit_index;
0875         }
0876     }
0877 
0878     /* Store the end offset, where the epilogue begins */
0879     ctx->descriptors[prog->len] = ctx->jit_index;
0880     return 0;
0881 }
0882 
0883 /* Set the branch conversion flag on all instructions */
0884 static void set_convert_flag(struct jit_context *ctx, bool enable)
0885 {
0886     const struct bpf_prog *prog = ctx->program;
0887     u32 flag = enable ? JIT_DESC_CONVERT : 0;
0888     unsigned int i;
0889 
0890     for (i = 0; i <= prog->len; i++)
0891         ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag;
0892 }
0893 
0894 static void jit_fill_hole(void *area, unsigned int size)
0895 {
0896     u32 *p;
0897 
0898     /* We are guaranteed to have aligned memory. */
0899     for (p = area; size >= sizeof(u32); size -= sizeof(u32))
0900         uasm_i_break(&p, BRK_BUG); /* Increments p */
0901 }
0902 
0903 bool bpf_jit_needs_zext(void)
0904 {
0905     return true;
0906 }
0907 
0908 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
0909 {
0910     struct bpf_prog *tmp, *orig_prog = prog;
0911     struct bpf_binary_header *header = NULL;
0912     struct jit_context ctx;
0913     bool tmp_blinded = false;
0914     unsigned int tmp_idx;
0915     unsigned int image_size;
0916     u8 *image_ptr;
0917     int tries;
0918 
0919     /*
0920      * If BPF JIT was not enabled then we must fall back to
0921      * the interpreter.
0922      */
0923     if (!prog->jit_requested)
0924         return orig_prog;
0925     /*
0926      * If constant blinding was enabled and we failed during blinding
0927      * then we must fall back to the interpreter. Otherwise, we save
0928      * the new JITed code.
0929      */
0930     tmp = bpf_jit_blind_constants(prog);
0931     if (IS_ERR(tmp))
0932         return orig_prog;
0933     if (tmp != prog) {
0934         tmp_blinded = true;
0935         prog = tmp;
0936     }
0937 
0938     memset(&ctx, 0, sizeof(ctx));
0939     ctx.program = prog;
0940 
0941     /*
0942      * Not able to allocate memory for descriptors[], then
0943      * we must fall back to the interpreter
0944      */
0945     ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors),
0946                   GFP_KERNEL);
0947     if (ctx.descriptors == NULL)
0948         goto out_err;
0949 
0950     /* First pass discovers used resources */
0951     if (build_body(&ctx) < 0)
0952         goto out_err;
0953     /*
0954      * Second pass computes instruction offsets.
0955      * If any PC-relative branches are out of range, a sequence of
0956      * a PC-relative branch + a jump is generated, and we have to
0957      * try again from the beginning to generate the new offsets.
0958      * This is done until no additional conversions are necessary.
0959      * The last two iterations are done with all branches being
0960      * converted, to guarantee offset table convergence within a
0961      * fixed number of iterations.
0962      */
0963     ctx.jit_index = 0;
0964     build_prologue(&ctx);
0965     tmp_idx = ctx.jit_index;
0966 
0967     tries = JIT_MAX_ITERATIONS;
0968     do {
0969         ctx.jit_index = tmp_idx;
0970         ctx.changes = 0;
0971         if (tries == 2)
0972             set_convert_flag(&ctx, true);
0973         if (build_body(&ctx) < 0)
0974             goto out_err;
0975     } while (ctx.changes > 0 && --tries > 0);
0976 
0977     if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge"))
0978         goto out_err;
0979 
0980     build_epilogue(&ctx, MIPS_R_RA);
0981 
0982     /* Now we know the size of the structure to make */
0983     image_size = sizeof(u32) * ctx.jit_index;
0984     header = bpf_jit_binary_alloc(image_size, &image_ptr,
0985                       sizeof(u32), jit_fill_hole);
0986     /*
0987      * Not able to allocate memory for the structure then
0988      * we must fall back to the interpretation
0989      */
0990     if (header == NULL)
0991         goto out_err;
0992 
0993     /* Actual pass to generate final JIT code */
0994     ctx.target = (u32 *)image_ptr;
0995     ctx.jit_index = 0;
0996 
0997     /*
0998      * If building the JITed code fails somehow,
0999      * we fall back to the interpretation.
1000      */
1001     build_prologue(&ctx);
1002     if (build_body(&ctx) < 0)
1003         goto out_err;
1004     build_epilogue(&ctx, MIPS_R_RA);
1005 
1006     /* Populate line info meta data */
1007     set_convert_flag(&ctx, false);
1008     bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
1009 
1010     /* Set as read-only exec and flush instruction cache */
1011     bpf_jit_binary_lock_ro(header);
1012     flush_icache_range((unsigned long)header,
1013                (unsigned long)&ctx.target[ctx.jit_index]);
1014 
1015     if (bpf_jit_enable > 1)
1016         bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1017 
1018     prog->bpf_func = (void *)ctx.target;
1019     prog->jited = 1;
1020     prog->jited_len = image_size;
1021 
1022 out:
1023     if (tmp_blinded)
1024         bpf_jit_prog_release_other(prog, prog == orig_prog ?
1025                        tmp : orig_prog);
1026     kfree(ctx.descriptors);
1027     return prog;
1028 
1029 out_err:
1030     prog = orig_prog;
1031     if (header)
1032         bpf_jit_binary_free(header);
1033     goto out;
1034 }