Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * bpf_jit.h: BPF JIT compiler for PPC
0004  *
0005  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
0006  *       2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
0007  */
0008 #ifndef _BPF_JIT_H
0009 #define _BPF_JIT_H
0010 
0011 #ifndef __ASSEMBLY__
0012 
0013 #include <asm/types.h>
0014 #include <asm/ppc-opcode.h>
0015 
0016 #ifdef CONFIG_PPC64_ELF_ABI_V1
0017 #define FUNCTION_DESCR_SIZE 24
0018 #else
0019 #define FUNCTION_DESCR_SIZE 0
0020 #endif
0021 
0022 #define PLANT_INSTR(d, idx, instr)                        \
0023     do { if (d) { (d)[idx] = instr; } idx++; } while (0)
0024 #define EMIT(instr)     PLANT_INSTR(image, ctx->idx, instr)
0025 
0026 /* Long jump; (unconditional 'branch') */
0027 #define PPC_JMP(dest)                                 \
0028     do {                                      \
0029         long offset = (long)(dest) - (ctx->idx * 4);              \
0030         if ((dest) != 0 && !is_offset_in_branch_range(offset)) {              \
0031             pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);           \
0032             return -ERANGE;                       \
0033         }                                 \
0034         EMIT(PPC_RAW_BRANCH(offset));                     \
0035     } while (0)
0036 
0037 /* bl (unconditional 'branch' with link) */
0038 #define PPC_BL(dest)    EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
0039 
0040 /* "cond" here covers BO:BI fields. */
0041 #define PPC_BCC_SHORT(cond, dest)                         \
0042     do {                                      \
0043         long offset = (long)(dest) - (ctx->idx * 4);              \
0044         if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) {             \
0045             pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);       \
0046             return -ERANGE;                       \
0047         }                                 \
0048         EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc));                  \
0049     } while (0)
0050 
0051 /* Sign-extended 32-bit immediate load */
0052 #define PPC_LI32(d, i)      do {                          \
0053         if ((int)(uintptr_t)(i) >= -32768 &&                  \
0054                 (int)(uintptr_t)(i) < 32768)              \
0055             EMIT(PPC_RAW_LI(d, i));                   \
0056         else {                                \
0057             EMIT(PPC_RAW_LIS(d, IMM_H(i)));               \
0058             if (IMM_L(i))                         \
0059                 EMIT(PPC_RAW_ORI(d, d, IMM_L(i)));        \
0060         } } while(0)
0061 
0062 #ifdef CONFIG_PPC64
0063 #define PPC_LI64(d, i)      do {                          \
0064         if ((long)(i) >= -2147483648 &&                   \
0065                 (long)(i) < 2147483648)               \
0066             PPC_LI32(d, i);                       \
0067         else {                                \
0068             if (!((uintptr_t)(i) & 0xffff800000000000ULL))        \
0069                 EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) &   \
0070                         0xffff));             \
0071             else {                            \
0072                 EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
0073                 if ((uintptr_t)(i) & 0x0000ffff00000000ULL)   \
0074                     EMIT(PPC_RAW_ORI(d, d,            \
0075                       ((uintptr_t)(i) >> 32) & 0xffff));  \
0076             }                             \
0077             EMIT(PPC_RAW_SLDI(d, d, 32));                 \
0078             if ((uintptr_t)(i) & 0x00000000ffff0000ULL)       \
0079                 EMIT(PPC_RAW_ORIS(d, d,               \
0080                      ((uintptr_t)(i) >> 16) & 0xffff));   \
0081             if ((uintptr_t)(i) & 0x000000000000ffffULL)       \
0082                 EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) &       \
0083                             0xffff));             \
0084         } } while (0)
0085 #endif
0086 
0087 /*
0088  * The fly in the ointment of code size changing from pass to pass is
0089  * avoided by padding the short branch case with a NOP.  If code size differs
0090  * with different branch reaches we will have the issue of code moving from
0091  * one pass to the next and will need a few passes to converge on a stable
0092  * state.
0093  */
0094 #define PPC_BCC(cond, dest) do {                          \
0095         if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) {    \
0096             PPC_BCC_SHORT(cond, dest);                \
0097             EMIT(PPC_RAW_NOP());                      \
0098         } else {                              \
0099             /* Flip the 'T or F' bit to invert comparison */      \
0100             PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4);  \
0101             PPC_JMP(dest);                        \
0102         } } while(0)
0103 
0104 /* To create a branch condition, select a bit of cr0... */
0105 #define CR0_LT      0
0106 #define CR0_GT      1
0107 #define CR0_EQ      2
0108 /* ...and modify BO[3] */
0109 #define COND_CMP_TRUE   0x100
0110 #define COND_CMP_FALSE  0x000
0111 /* Together, they make all required comparisons: */
0112 #define COND_GT     (CR0_GT | COND_CMP_TRUE)
0113 #define COND_GE     (CR0_LT | COND_CMP_FALSE)
0114 #define COND_EQ     (CR0_EQ | COND_CMP_TRUE)
0115 #define COND_NE     (CR0_EQ | COND_CMP_FALSE)
0116 #define COND_LT     (CR0_LT | COND_CMP_TRUE)
0117 #define COND_LE     (CR0_GT | COND_CMP_FALSE)
0118 
0119 #define SEEN_FUNC   0x20000000 /* might call external helpers */
0120 #define SEEN_TAILCALL   0x40000000 /* uses tail calls */
0121 
0122 struct codegen_context {
0123     /*
0124      * This is used to track register usage as well
0125      * as calls to external helpers.
0126      * - register usage is tracked with corresponding
0127      *   bits (r3-r31)
0128      * - rest of the bits can be used to track other
0129      *   things -- for now, we use bits 0 to 2
0130      *   encoded in SEEN_* macros above
0131      */
0132     unsigned int seen;
0133     unsigned int idx;
0134     unsigned int stack_size;
0135     int b2p[MAX_BPF_JIT_REG + 2];
0136     unsigned int exentry_idx;
0137     unsigned int alt_exit_addr;
0138 };
0139 
0140 #define bpf_to_ppc(r)   (ctx->b2p[r])
0141 
0142 #ifdef CONFIG_PPC32
0143 #define BPF_FIXUP_LEN   3 /* Three instructions => 12 bytes */
0144 #else
0145 #define BPF_FIXUP_LEN   2 /* Two instructions => 8 bytes */
0146 #endif
0147 
0148 static inline void bpf_flush_icache(void *start, void *end)
0149 {
0150     smp_wmb();  /* smp write barrier */
0151     flush_icache_range((unsigned long)start, (unsigned long)end);
0152 }
0153 
0154 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
0155 {
0156     return ctx->seen & (1 << (31 - i));
0157 }
0158 
0159 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
0160 {
0161     ctx->seen |= 1 << (31 - i);
0162 }
0163 
0164 static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
0165 {
0166     ctx->seen &= ~(1 << (31 - i));
0167 }
0168 
0169 void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
0170 int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
0171 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
0172                u32 *addrs, int pass);
0173 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
0174 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
0175 void bpf_jit_realloc_regs(struct codegen_context *ctx);
0176 int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
0177 
0178 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
0179               int insn_idx, int jmp_off, int dst_reg);
0180 
0181 #endif
0182 
0183 #endif