0001 {
0002 "check valid spill/fill",
0003 .insns = {
0004
0005 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0006
0007 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
0008
0009
0010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0011 BPF_EXIT_INSN(),
0012 },
0013 .errstr_unpriv = "R0 leaks addr",
0014 .result = ACCEPT,
0015 .result_unpriv = REJECT,
0016 .retval = POINTER_VALUE,
0017 },
0018 {
0019 "check valid spill/fill, skb mark",
0020 .insns = {
0021 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
0022 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
0023 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0024 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
0025 offsetof(struct __sk_buff, mark)),
0026 BPF_EXIT_INSN(),
0027 },
0028 .result = ACCEPT,
0029 .result_unpriv = ACCEPT,
0030 },
0031 {
0032 "check valid spill/fill, ptr to mem",
0033 .insns = {
0034
0035 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0036 BPF_LD_MAP_FD(BPF_REG_1, 0),
0037 BPF_MOV64_IMM(BPF_REG_2, 8),
0038 BPF_MOV64_IMM(BPF_REG_3, 0),
0039 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
0040
0041 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0042
0043 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
0044
0045 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
0046
0047 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
0048
0049 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
0050
0051 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
0052 BPF_MOV64_IMM(BPF_REG_2, 0),
0053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
0054 BPF_MOV64_IMM(BPF_REG_0, 0),
0055 BPF_EXIT_INSN(),
0056 },
0057 .fixup_map_ringbuf = { 1 },
0058 .result = ACCEPT,
0059 .result_unpriv = ACCEPT,
0060 },
0061 {
0062 "check with invalid reg offset 0",
0063 .insns = {
0064
0065 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0066 BPF_LD_MAP_FD(BPF_REG_1, 0),
0067 BPF_MOV64_IMM(BPF_REG_2, 8),
0068 BPF_MOV64_IMM(BPF_REG_3, 0),
0069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
0070
0071 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0072
0073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
0074
0075 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
0076
0077 BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0),
0078
0079 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0080 BPF_MOV64_IMM(BPF_REG_2, 0),
0081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
0082 BPF_MOV64_IMM(BPF_REG_0, 0),
0083 BPF_EXIT_INSN(),
0084 },
0085 .fixup_map_ringbuf = { 1 },
0086 .result = REJECT,
0087 .errstr = "R0 pointer arithmetic on alloc_mem_or_null prohibited",
0088 },
0089 {
0090 "check corrupted spill/fill",
0091 .insns = {
0092
0093 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0094
0095 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
0096
0097
0098
0099 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0100
0101 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
0102 BPF_EXIT_INSN(),
0103 },
0104 .errstr_unpriv = "attempt to corrupt spilled",
0105 .errstr = "R0 invalid mem access 'scalar'",
0106 .result = REJECT,
0107 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
0108 },
0109 {
0110 "check corrupted spill/fill, LSB",
0111 .insns = {
0112 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0113 BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
0114 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0115 BPF_EXIT_INSN(),
0116 },
0117 .errstr_unpriv = "attempt to corrupt spilled",
0118 .result_unpriv = REJECT,
0119 .result = ACCEPT,
0120 .retval = POINTER_VALUE,
0121 },
0122 {
0123 "check corrupted spill/fill, MSB",
0124 .insns = {
0125 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
0126 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
0127 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
0128 BPF_EXIT_INSN(),
0129 },
0130 .errstr_unpriv = "attempt to corrupt spilled",
0131 .result_unpriv = REJECT,
0132 .result = ACCEPT,
0133 .retval = POINTER_VALUE,
0134 },
0135 {
0136 "Spill and refill a u32 const scalar. Offset to skb->data",
0137 .insns = {
0138 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0139 offsetof(struct __sk_buff, data)),
0140 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0141 offsetof(struct __sk_buff, data_end)),
0142
0143 BPF_MOV32_IMM(BPF_REG_4, 20),
0144
0145 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0146
0147 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
0148
0149 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0150
0151 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0152
0153 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0154
0155 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0156 BPF_MOV64_IMM(BPF_REG_0, 0),
0157 BPF_EXIT_INSN(),
0158 },
0159 .result = ACCEPT,
0160 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0161 },
0162 {
0163 "Spill a u32 const, refill from another half of the uninit u32 from the stack",
0164 .insns = {
0165
0166 BPF_MOV32_IMM(BPF_REG_4, 20),
0167
0168 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0169
0170 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
0171 BPF_MOV64_IMM(BPF_REG_0, 0),
0172 BPF_EXIT_INSN(),
0173 },
0174 .result = REJECT,
0175 .errstr = "invalid read from stack off -4+0 size 4",
0176 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0177 },
0178 {
0179 "Spill a u32 const scalar. Refill as u16. Offset to skb->data",
0180 .insns = {
0181 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0182 offsetof(struct __sk_buff, data)),
0183 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0184 offsetof(struct __sk_buff, data_end)),
0185
0186 BPF_MOV32_IMM(BPF_REG_4, 20),
0187
0188 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0189
0190 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
0191
0192 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0193
0194 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0195
0196 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0197
0198 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0199 BPF_MOV64_IMM(BPF_REG_0, 0),
0200 BPF_EXIT_INSN(),
0201 },
0202 .result = REJECT,
0203 .errstr = "invalid access to packet",
0204 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0205 },
0206 {
0207 "Spill u32 const scalars. Refill as u64. Offset to skb->data",
0208 .insns = {
0209 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0210 offsetof(struct __sk_buff, data)),
0211 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0212 offsetof(struct __sk_buff, data_end)),
0213
0214 BPF_MOV32_IMM(BPF_REG_6, 0),
0215
0216 BPF_MOV32_IMM(BPF_REG_7, 20),
0217
0218 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
0219
0220 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
0221
0222 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
0223
0224 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0225
0226 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0227
0228 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0229
0230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0231 BPF_MOV64_IMM(BPF_REG_0, 0),
0232 BPF_EXIT_INSN(),
0233 },
0234 .result = REJECT,
0235 .errstr = "invalid access to packet",
0236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0237 },
0238 {
0239 "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
0240 .insns = {
0241 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0242 offsetof(struct __sk_buff, data)),
0243 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0244 offsetof(struct __sk_buff, data_end)),
0245
0246 BPF_MOV32_IMM(BPF_REG_4, 20),
0247
0248 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0249
0250 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
0251
0252 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0253
0254 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0255
0256 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0257
0258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0259 BPF_MOV64_IMM(BPF_REG_0, 0),
0260 BPF_EXIT_INSN(),
0261 },
0262 .result = REJECT,
0263 .errstr = "invalid access to packet",
0264 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0265 },
0266 {
0267 "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data",
0268 .insns = {
0269 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0270 offsetof(struct __sk_buff, data)),
0271 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0272 offsetof(struct __sk_buff, data_end)),
0273
0274 BPF_MOV32_IMM(BPF_REG_4, 20),
0275
0276 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0277
0278 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
0279
0280 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
0281
0282 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0283
0284 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
0285
0286 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0287
0288 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
0289 BPF_MOV64_IMM(BPF_REG_0, 0),
0290 BPF_EXIT_INSN(),
0291 },
0292 .result = REJECT,
0293 .errstr = "invalid access to packet",
0294 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0295 },
0296 {
0297 "Spill and refill a umax=40 bounded scalar. Offset to skb->data",
0298 .insns = {
0299 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0300 offsetof(struct __sk_buff, data)),
0301 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0302 offsetof(struct __sk_buff, data_end)),
0303 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
0304 offsetof(struct __sk_buff, tstamp)),
0305 BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
0306 BPF_MOV64_IMM(BPF_REG_0, 0),
0307 BPF_EXIT_INSN(),
0308
0309 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0310
0311 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
0312
0313 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
0314
0315 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0316
0317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
0318
0319 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
0320
0321 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
0322 BPF_MOV64_IMM(BPF_REG_0, 0),
0323 BPF_EXIT_INSN(),
0324 },
0325 .result = ACCEPT,
0326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0327 },
0328 {
0329 "Spill a u32 scalar at fp-4 and then at fp-8",
0330 .insns = {
0331
0332 BPF_MOV32_IMM(BPF_REG_4, 4321),
0333
0334 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
0335
0336 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
0337
0338 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
0339 BPF_MOV64_IMM(BPF_REG_0, 0),
0340 BPF_EXIT_INSN(),
0341 },
0342 .result = ACCEPT,
0343 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0344 },