Back to home page

OSCL-LXR

 
 

    


0001 {
0002     "BPF_ATOMIC_AND without fetch",
0003     .insns = {
0004         /* val = 0x110; */
0005         BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
0006         /* atomic_and(&val, 0x011); */
0007         BPF_MOV64_IMM(BPF_REG_1, 0x011),
0008         BPF_ATOMIC_OP(BPF_DW, BPF_AND, BPF_REG_10, BPF_REG_1, -8),
0009         /* if (val != 0x010) exit(2); */
0010         BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0011         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x010, 2),
0012         BPF_MOV64_IMM(BPF_REG_0, 2),
0013         BPF_EXIT_INSN(),
0014         /* r1 should not be clobbered, no BPF_FETCH flag */
0015         BPF_MOV64_IMM(BPF_REG_0, 0),
0016         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
0017         BPF_MOV64_IMM(BPF_REG_0, 1),
0018         BPF_EXIT_INSN(),
0019     },
0020     .result = ACCEPT,
0021 },
0022 {
0023     "BPF_ATOMIC_AND with fetch",
0024     .insns = {
0025         BPF_MOV64_IMM(BPF_REG_0, 123),
0026         /* val = 0x110; */
0027         BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
0028         /* old = atomic_fetch_and(&val, 0x011); */
0029         BPF_MOV64_IMM(BPF_REG_1, 0x011),
0030         BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
0031         /* if (old != 0x110) exit(3); */
0032         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
0033         BPF_MOV64_IMM(BPF_REG_0, 3),
0034         BPF_EXIT_INSN(),
0035         /* if (val != 0x010) exit(2); */
0036         BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
0037         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
0038         BPF_MOV64_IMM(BPF_REG_1, 2),
0039         BPF_EXIT_INSN(),
0040         /* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
0041         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
0042         BPF_MOV64_IMM(BPF_REG_0, 1),
0043         BPF_EXIT_INSN(),
0044         /* exit(0); */
0045         BPF_MOV64_IMM(BPF_REG_0, 0),
0046         BPF_EXIT_INSN(),
0047     },
0048     .result = ACCEPT,
0049 },
0050 {
0051     "BPF_ATOMIC_AND with fetch 32bit",
0052     .insns = {
0053         /* r0 = (s64) -1 */
0054         BPF_MOV64_IMM(BPF_REG_0, 0),
0055         BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
0056         /* val = 0x110; */
0057         BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
0058         /* old = atomic_fetch_and(&val, 0x011); */
0059         BPF_MOV32_IMM(BPF_REG_1, 0x011),
0060         BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
0061         /* if (old != 0x110) exit(3); */
0062         BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
0063         BPF_MOV32_IMM(BPF_REG_0, 3),
0064         BPF_EXIT_INSN(),
0065         /* if (val != 0x010) exit(2); */
0066         BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
0067         BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
0068         BPF_MOV32_IMM(BPF_REG_1, 2),
0069         BPF_EXIT_INSN(),
0070         /* Check R0 wasn't clobbered (for fear of x86 JIT bug)
0071          * It should be -1 so add 1 to get exit code.
0072          */
0073         BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
0074         BPF_EXIT_INSN(),
0075     },
0076     .result = ACCEPT,
0077 },
0078 {
0079     "BPF_ATOMIC_AND with fetch - r0 as source reg",
0080     .insns = {
0081         /* val = 0x110; */
0082         BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
0083         /* old = atomic_fetch_and(&val, 0x011); */
0084         BPF_MOV64_IMM(BPF_REG_0, 0x011),
0085         BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8),
0086         /* if (old != 0x110) exit(3); */
0087         BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2),
0088         BPF_MOV64_IMM(BPF_REG_0, 3),
0089         BPF_EXIT_INSN(),
0090         /* if (val != 0x010) exit(2); */
0091         BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
0092         BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
0093         BPF_MOV64_IMM(BPF_REG_1, 2),
0094         BPF_EXIT_INSN(),
0095         /* exit(0); */
0096         BPF_MOV64_IMM(BPF_REG_0, 0),
0097         BPF_EXIT_INSN(),
0098     },
0099     .result = ACCEPT,
0100 },