Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
0002 /* eBPF instruction mini library */
0003 #ifndef __BPF_INSN_H
0004 #define __BPF_INSN_H
0005 
0006 struct bpf_insn;
0007 
0008 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
0009 
0010 #define BPF_ALU64_REG(OP, DST, SRC)             \
0011     ((struct bpf_insn) {                    \
0012         .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,    \
0013         .dst_reg = DST,                 \
0014         .src_reg = SRC,                 \
0015         .off   = 0,                 \
0016         .imm   = 0 })
0017 
0018 #define BPF_ALU32_REG(OP, DST, SRC)             \
0019     ((struct bpf_insn) {                    \
0020         .code  = BPF_ALU | BPF_OP(OP) | BPF_X,      \
0021         .dst_reg = DST,                 \
0022         .src_reg = SRC,                 \
0023         .off   = 0,                 \
0024         .imm   = 0 })
0025 
0026 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
0027 
0028 #define BPF_ALU64_IMM(OP, DST, IMM)             \
0029     ((struct bpf_insn) {                    \
0030         .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,    \
0031         .dst_reg = DST,                 \
0032         .src_reg = 0,                   \
0033         .off   = 0,                 \
0034         .imm   = IMM })
0035 
0036 #define BPF_ALU32_IMM(OP, DST, IMM)             \
0037     ((struct bpf_insn) {                    \
0038         .code  = BPF_ALU | BPF_OP(OP) | BPF_K,      \
0039         .dst_reg = DST,                 \
0040         .src_reg = 0,                   \
0041         .off   = 0,                 \
0042         .imm   = IMM })
0043 
0044 /* Short form of mov, dst_reg = src_reg */
0045 
0046 #define BPF_MOV64_REG(DST, SRC)                 \
0047     ((struct bpf_insn) {                    \
0048         .code  = BPF_ALU64 | BPF_MOV | BPF_X,       \
0049         .dst_reg = DST,                 \
0050         .src_reg = SRC,                 \
0051         .off   = 0,                 \
0052         .imm   = 0 })
0053 
0054 #define BPF_MOV32_REG(DST, SRC)                 \
0055     ((struct bpf_insn) {                    \
0056         .code  = BPF_ALU | BPF_MOV | BPF_X,     \
0057         .dst_reg = DST,                 \
0058         .src_reg = SRC,                 \
0059         .off   = 0,                 \
0060         .imm   = 0 })
0061 
0062 /* Short form of mov, dst_reg = imm32 */
0063 
0064 #define BPF_MOV64_IMM(DST, IMM)                 \
0065     ((struct bpf_insn) {                    \
0066         .code  = BPF_ALU64 | BPF_MOV | BPF_K,       \
0067         .dst_reg = DST,                 \
0068         .src_reg = 0,                   \
0069         .off   = 0,                 \
0070         .imm   = IMM })
0071 
0072 #define BPF_MOV32_IMM(DST, IMM)                 \
0073     ((struct bpf_insn) {                    \
0074         .code  = BPF_ALU | BPF_MOV | BPF_K,     \
0075         .dst_reg = DST,                 \
0076         .src_reg = 0,                   \
0077         .off   = 0,                 \
0078         .imm   = IMM })
0079 
0080 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
0081 #define BPF_LD_IMM64(DST, IMM)                  \
0082     BPF_LD_IMM64_RAW(DST, 0, IMM)
0083 
0084 #define BPF_LD_IMM64_RAW(DST, SRC, IMM)             \
0085     ((struct bpf_insn) {                    \
0086         .code  = BPF_LD | BPF_DW | BPF_IMM,     \
0087         .dst_reg = DST,                 \
0088         .src_reg = SRC,                 \
0089         .off   = 0,                 \
0090         .imm   = (__u32) (IMM) }),          \
0091     ((struct bpf_insn) {                    \
0092         .code  = 0, /* zero is reserved opcode */   \
0093         .dst_reg = 0,                   \
0094         .src_reg = 0,                   \
0095         .off   = 0,                 \
0096         .imm   = ((__u64) (IMM)) >> 32 })
0097 
0098 #ifndef BPF_PSEUDO_MAP_FD
0099 # define BPF_PSEUDO_MAP_FD  1
0100 #endif
0101 
0102 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
0103 #define BPF_LD_MAP_FD(DST, MAP_FD)              \
0104     BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
0105 
0106 
0107 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
0108 
0109 #define BPF_LD_ABS(SIZE, IMM)                   \
0110     ((struct bpf_insn) {                    \
0111         .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
0112         .dst_reg = 0,                   \
0113         .src_reg = 0,                   \
0114         .off   = 0,                 \
0115         .imm   = IMM })
0116 
0117 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
0118 
0119 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)            \
0120     ((struct bpf_insn) {                    \
0121         .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
0122         .dst_reg = DST,                 \
0123         .src_reg = SRC,                 \
0124         .off   = OFF,                   \
0125         .imm   = 0 })
0126 
0127 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
0128 
0129 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)            \
0130     ((struct bpf_insn) {                    \
0131         .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
0132         .dst_reg = DST,                 \
0133         .src_reg = SRC,                 \
0134         .off   = OFF,                   \
0135         .imm   = 0 })
0136 
0137 /*
0138  * Atomic operations:
0139  *
0140  *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
0141  *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
0142  *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
0143  *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
0144  *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
0145  *   BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
0146  *   BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
0147  *   BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
0148  *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
0149  *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
0150  */
0151 
0152 #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)          \
0153     ((struct bpf_insn) {                    \
0154         .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
0155         .dst_reg = DST,                 \
0156         .src_reg = SRC,                 \
0157         .off   = OFF,                   \
0158         .imm   = OP })
0159 
0160 /* Legacy alias */
0161 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
0162 
0163 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
0164 
0165 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)             \
0166     ((struct bpf_insn) {                    \
0167         .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
0168         .dst_reg = DST,                 \
0169         .src_reg = 0,                   \
0170         .off   = OFF,                   \
0171         .imm   = IMM })
0172 
0173 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
0174 
0175 #define BPF_JMP_REG(OP, DST, SRC, OFF)              \
0176     ((struct bpf_insn) {                    \
0177         .code  = BPF_JMP | BPF_OP(OP) | BPF_X,      \
0178         .dst_reg = DST,                 \
0179         .src_reg = SRC,                 \
0180         .off   = OFF,                   \
0181         .imm   = 0 })
0182 
0183 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
0184 
0185 #define BPF_JMP32_REG(OP, DST, SRC, OFF)            \
0186     ((struct bpf_insn) {                    \
0187         .code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,    \
0188         .dst_reg = DST,                 \
0189         .src_reg = SRC,                 \
0190         .off   = OFF,                   \
0191         .imm   = 0 })
0192 
0193 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
0194 
0195 #define BPF_JMP_IMM(OP, DST, IMM, OFF)              \
0196     ((struct bpf_insn) {                    \
0197         .code  = BPF_JMP | BPF_OP(OP) | BPF_K,      \
0198         .dst_reg = DST,                 \
0199         .src_reg = 0,                   \
0200         .off   = OFF,                   \
0201         .imm   = IMM })
0202 
0203 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
0204 
0205 #define BPF_JMP32_IMM(OP, DST, IMM, OFF)            \
0206     ((struct bpf_insn) {                    \
0207         .code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,    \
0208         .dst_reg = DST,                 \
0209         .src_reg = 0,                   \
0210         .off   = OFF,                   \
0211         .imm   = IMM })
0212 
0213 /* Raw code statement block */
0214 
0215 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)          \
0216     ((struct bpf_insn) {                    \
0217         .code  = CODE,                  \
0218         .dst_reg = DST,                 \
0219         .src_reg = SRC,                 \
0220         .off   = OFF,                   \
0221         .imm   = IMM })
0222 
0223 /* Program exit */
0224 
0225 #define BPF_EXIT_INSN()                     \
0226     ((struct bpf_insn) {                    \
0227         .code  = BPF_JMP | BPF_EXIT,            \
0228         .dst_reg = 0,                   \
0229         .src_reg = 0,                   \
0230         .off   = 0,                 \
0231         .imm   = 0 })
0232 
0233 #endif