Back to home page

OSCL-LXR

 
 

    


0001 {
0002     "check valid spill/fill",
0003     .insns = {
0004     /* spill R1(ctx) into stack */
0005     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0006     /* fill it back into R2 */
0007     BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
0008     /* should be able to access R0 = *(R2 + 8) */
0009     /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
0010     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0011     BPF_EXIT_INSN(),
0012     },
0013     .errstr_unpriv = "R0 leaks addr",
0014     .result = ACCEPT,
0015     .result_unpriv = REJECT,
0016     .retval = POINTER_VALUE,
0017 },
0018 {
0019     "check valid spill/fill, skb mark",
0020     .insns = {
0021     BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
0022     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
0023     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0024     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
0025             offsetof(struct __sk_buff, mark)),
0026     BPF_EXIT_INSN(),
0027     },
0028     .result = ACCEPT,
0029     .result_unpriv = ACCEPT,
0030 },
0031 {
0032     "check valid spill/fill, ptr to mem",
0033     .insns = {
0034     /* reserve 8 byte ringbuf memory */
0035     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0036     BPF_LD_MAP_FD(BPF_REG_1, 0),
0037     BPF_MOV64_IMM(BPF_REG_2, 8),
0038     BPF_MOV64_IMM(BPF_REG_3, 0),
0039     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
0040     /* store a pointer to the reserved memory in R6 */
0041     BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0042     /* check whether the reservation was successful */
0043     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
0044     /* spill R6(mem) into the stack */
0045     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
0046     /* fill it back in R7 */
0047     BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
0048     /* should be able to access *(R7) = 0 */
0049     BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
0050     /* submit the reserved ringbuf memory */
0051     BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
0052     BPF_MOV64_IMM(BPF_REG_2, 0),
0053     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
0054     BPF_MOV64_IMM(BPF_REG_0, 0),
0055     BPF_EXIT_INSN(),
0056     },
0057     .fixup_map_ringbuf = { 1 },
0058     .result = ACCEPT,
0059     .result_unpriv = ACCEPT,
0060 },
0061 {
0062     "check with invalid reg offset 0",
0063     .insns = {
0064     /* reserve 8 byte ringbuf memory */
0065     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0066     BPF_LD_MAP_FD(BPF_REG_1, 0),
0067     BPF_MOV64_IMM(BPF_REG_2, 8),
0068     BPF_MOV64_IMM(BPF_REG_3, 0),
0069     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
0070     /* store a pointer to the reserved memory in R6 */
0071     BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0072     /* add invalid offset to memory or NULL */
0073     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
0074     /* check whether the reservation was successful */
0075     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
0076     /* should not be able to access *(R7) = 0 */
0077     BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0),
0078     /* submit the reserved ringbuf memory */
0079     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0080     BPF_MOV64_IMM(BPF_REG_2, 0),
0081     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
0082     BPF_MOV64_IMM(BPF_REG_0, 0),
0083     BPF_EXIT_INSN(),
0084     },
0085     .fixup_map_ringbuf = { 1 },
0086     .result = REJECT,
0087     .errstr = "R0 pointer arithmetic on alloc_mem_or_null prohibited",
0088 },
0089 {
0090     "check corrupted spill/fill",
0091     .insns = {
0092     /* spill R1(ctx) into stack */
0093     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0094     /* mess up with R1 pointer on stack */
0095     BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
0096     /* fill back into R0 is fine for priv.
0097      * R0 now becomes SCALAR_VALUE.
0098      */
0099     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0100     /* Load from R0 should fail. */
0101     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
0102     BPF_EXIT_INSN(),
0103     },
0104     .errstr_unpriv = "attempt to corrupt spilled",
0105     .errstr = "R0 invalid mem access 'scalar'",
0106     .result = REJECT,
0107     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
0108 },
0109 {
0110     "check corrupted spill/fill, LSB",
0111     .insns = {
0112     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0113     BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
0114     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0115     BPF_EXIT_INSN(),
0116     },
0117     .errstr_unpriv = "attempt to corrupt spilled",
0118     .result_unpriv = REJECT,
0119     .result = ACCEPT,
0120     .retval = POINTER_VALUE,
0121 },
0122 {
0123     "check corrupted spill/fill, MSB",
0124     .insns = {
0125     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0126     BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
0127     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0128     BPF_EXIT_INSN(),
0129     },
0130     .errstr_unpriv = "attempt to corrupt spilled",
0131     .result_unpriv = REJECT,
0132     .result = ACCEPT,
0133     .retval = POINTER_VALUE,
0134 },
0135 {
0136     "Spill and refill a u32 const scalar.  Offset to skb->data",
0137     .insns = {
0138     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0139             offsetof(struct __sk_buff, data)),
0140     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0141             offsetof(struct __sk_buff, data_end)),
0142     /* r4 = 20 */
0143     BPF_MOV32_IMM(BPF_REG_4, 20),
0144     /* *(u32 *)(r10 -8) = r4 */
0145     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0146     /* r4 = *(u32 *)(r10 -8) */
0147     BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
0148     /* r0 = r2 */
0149     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0150     /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
0151     BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0152     /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
0153     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0154     /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
0155     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0156     BPF_MOV64_IMM(BPF_REG_0, 0),
0157     BPF_EXIT_INSN(),
0158     },
0159     .result = ACCEPT,
0160     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0161 },
0162 {
0163     "Spill a u32 const, refill from another half of the uninit u32 from the stack",
0164     .insns = {
0165     /* r4 = 20 */
0166     BPF_MOV32_IMM(BPF_REG_4, 20),
0167     /* *(u32 *)(r10 -8) = r4 */
0168     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0169     /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
0170     BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
0171     BPF_MOV64_IMM(BPF_REG_0, 0),
0172     BPF_EXIT_INSN(),
0173     },
0174     .result = REJECT,
0175     .errstr = "invalid read from stack off -4+0 size 4",
0176     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0177 },
0178 {
0179     "Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
0180     .insns = {
0181     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0182             offsetof(struct __sk_buff, data)),
0183     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0184             offsetof(struct __sk_buff, data_end)),
0185     /* r4 = 20 */
0186     BPF_MOV32_IMM(BPF_REG_4, 20),
0187     /* *(u32 *)(r10 -8) = r4 */
0188     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0189     /* r4 = *(u16 *)(r10 -8) */
0190     BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
0191     /* r0 = r2 */
0192     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0193     /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
0194     BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0195     /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
0196     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0197     /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
0198     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0199     BPF_MOV64_IMM(BPF_REG_0, 0),
0200     BPF_EXIT_INSN(),
0201     },
0202     .result = REJECT,
0203     .errstr = "invalid access to packet",
0204     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0205 },
0206 {
0207     "Spill u32 const scalars.  Refill as u64.  Offset to skb->data",
0208     .insns = {
0209     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0210             offsetof(struct __sk_buff, data)),
0211     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0212             offsetof(struct __sk_buff, data_end)),
0213     /* r6 = 0 */
0214     BPF_MOV32_IMM(BPF_REG_6, 0),
0215     /* r7 = 20 */
0216     BPF_MOV32_IMM(BPF_REG_7, 20),
0217     /* *(u32 *)(r10 -4) = r6 */
0218     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
0219     /* *(u32 *)(r10 -8) = r7 */
0220     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
0221     /* r4 = *(u64 *)(r10 -8) */
0222     BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
0223     /* r0 = r2 */
0224     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0225     /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
0226     BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0227     /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
0228     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0229     /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
0230     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0231     BPF_MOV64_IMM(BPF_REG_0, 0),
0232     BPF_EXIT_INSN(),
0233     },
0234     .result = REJECT,
0235     .errstr = "invalid access to packet",
0236     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0237 },
0238 {
0239     "Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
0240     .insns = {
0241     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0242             offsetof(struct __sk_buff, data)),
0243     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0244             offsetof(struct __sk_buff, data_end)),
0245     /* r4 = 20 */
0246     BPF_MOV32_IMM(BPF_REG_4, 20),
0247     /* *(u32 *)(r10 -8) = r4 */
0248     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0249     /* r4 = *(u16 *)(r10 -6) */
0250     BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
0251     /* r0 = r2 */
0252     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0253     /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
0254     BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0255     /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
0256     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0257     /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
0258     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0259     BPF_MOV64_IMM(BPF_REG_0, 0),
0260     BPF_EXIT_INSN(),
0261     },
0262     .result = REJECT,
0263     .errstr = "invalid access to packet",
0264     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0265 },
0266 {
0267     "Spill and refill a u32 const scalar at non 8byte aligned stack addr.  Offset to skb->data",
0268     .insns = {
0269     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0270             offsetof(struct __sk_buff, data)),
0271     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0272             offsetof(struct __sk_buff, data_end)),
0273     /* r4 = 20 */
0274     BPF_MOV32_IMM(BPF_REG_4, 20),
0275     /* *(u32 *)(r10 -8) = r4 */
0276     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0277     /* *(u32 *)(r10 -4) = r4 */
0278     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
0279     /* r4 = *(u32 *)(r10 -4),  */
0280     BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
0281     /* r0 = r2 */
0282     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0283     /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
0284     BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0285     /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
0286     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0287     /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
0288     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0289     BPF_MOV64_IMM(BPF_REG_0, 0),
0290     BPF_EXIT_INSN(),
0291     },
0292     .result = REJECT,
0293     .errstr = "invalid access to packet",
0294     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0295 },
0296 {
0297     "Spill and refill a umax=40 bounded scalar.  Offset to skb->data",
0298     .insns = {
0299     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0300             offsetof(struct __sk_buff, data)),
0301     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0302             offsetof(struct __sk_buff, data_end)),
0303     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
0304             offsetof(struct __sk_buff, tstamp)),
0305     BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
0306     BPF_MOV64_IMM(BPF_REG_0, 0),
0307     BPF_EXIT_INSN(),
0308     /* *(u32 *)(r10 -8) = r4 R4=umax=40 */
0309     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0310     /* r4 = (*u32 *)(r10 - 8) */
0311     BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
0312     /* r2 += r4 R2=pkt R4=umax=40 */
0313     BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
0314     /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
0315     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0316     /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
0317     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
0318     /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
0319     BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
0320     /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
0321     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
0322     BPF_MOV64_IMM(BPF_REG_0, 0),
0323     BPF_EXIT_INSN(),
0324     },
0325     .result = ACCEPT,
0326     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0327 },
0328 {
0329     "Spill a u32 scalar at fp-4 and then at fp-8",
0330     .insns = {
0331     /* r4 = 4321 */
0332     BPF_MOV32_IMM(BPF_REG_4, 4321),
0333     /* *(u32 *)(r10 -4) = r4 */
0334     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
0335     /* *(u32 *)(r10 -8) = r4 */
0336     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0337     /* r4 = *(u64 *)(r10 -8) */
0338     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
0339     BPF_MOV64_IMM(BPF_REG_0, 0),
0340     BPF_EXIT_INSN(),
0341     },
0342     .result = ACCEPT,
0343     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0344 },