0001 {
0002 "access skb fields ok",
0003 .insns = {
0004 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0005 offsetof(struct __sk_buff, len)),
0006 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
0007 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0008 offsetof(struct __sk_buff, mark)),
0009 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
0010 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0011 offsetof(struct __sk_buff, pkt_type)),
0012 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
0013 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0014 offsetof(struct __sk_buff, queue_mapping)),
0015 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
0016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0017 offsetof(struct __sk_buff, protocol)),
0018 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
0019 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0020 offsetof(struct __sk_buff, vlan_present)),
0021 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
0022 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0023 offsetof(struct __sk_buff, vlan_tci)),
0024 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
0025 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0026 offsetof(struct __sk_buff, napi_id)),
0027 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
0028 BPF_EXIT_INSN(),
0029 },
0030 .result = ACCEPT,
0031 },
0032 {
0033 "access skb fields bad1",
0034 .insns = {
0035 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
0036 BPF_EXIT_INSN(),
0037 },
0038 .errstr = "invalid bpf_context access",
0039 .result = REJECT,
0040 },
0041 {
0042 "access skb fields bad2",
0043 .insns = {
0044 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
0045 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0046 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
0048 BPF_LD_MAP_FD(BPF_REG_1, 0),
0049 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
0050 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0051 BPF_EXIT_INSN(),
0052 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0053 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0054 offsetof(struct __sk_buff, pkt_type)),
0055 BPF_EXIT_INSN(),
0056 },
0057 .fixup_map_hash_8b = { 4 },
0058 .errstr = "different pointers",
0059 .errstr_unpriv = "R1 pointer comparison",
0060 .result = REJECT,
0061 },
0062 {
0063 "access skb fields bad3",
0064 .insns = {
0065 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
0066 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0067 offsetof(struct __sk_buff, pkt_type)),
0068 BPF_EXIT_INSN(),
0069 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0070 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
0072 BPF_LD_MAP_FD(BPF_REG_1, 0),
0073 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
0074 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0075 BPF_EXIT_INSN(),
0076 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0077 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
0078 },
0079 .fixup_map_hash_8b = { 6 },
0080 .errstr = "different pointers",
0081 .errstr_unpriv = "R1 pointer comparison",
0082 .result = REJECT,
0083 },
0084 {
0085 "access skb fields bad4",
0086 .insns = {
0087 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
0088 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0089 offsetof(struct __sk_buff, len)),
0090 BPF_MOV64_IMM(BPF_REG_0, 0),
0091 BPF_EXIT_INSN(),
0092 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0093 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
0095 BPF_LD_MAP_FD(BPF_REG_1, 0),
0096 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
0097 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0098 BPF_EXIT_INSN(),
0099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0100 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
0101 },
0102 .fixup_map_hash_8b = { 7 },
0103 .errstr = "different pointers",
0104 .errstr_unpriv = "R1 pointer comparison",
0105 .result = REJECT,
0106 },
0107 {
0108 "invalid access __sk_buff family",
0109 .insns = {
0110 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0111 offsetof(struct __sk_buff, family)),
0112 BPF_EXIT_INSN(),
0113 },
0114 .errstr = "invalid bpf_context access",
0115 .result = REJECT,
0116 },
0117 {
0118 "invalid access __sk_buff remote_ip4",
0119 .insns = {
0120 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0121 offsetof(struct __sk_buff, remote_ip4)),
0122 BPF_EXIT_INSN(),
0123 },
0124 .errstr = "invalid bpf_context access",
0125 .result = REJECT,
0126 },
0127 {
0128 "invalid access __sk_buff local_ip4",
0129 .insns = {
0130 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0131 offsetof(struct __sk_buff, local_ip4)),
0132 BPF_EXIT_INSN(),
0133 },
0134 .errstr = "invalid bpf_context access",
0135 .result = REJECT,
0136 },
0137 {
0138 "invalid access __sk_buff remote_ip6",
0139 .insns = {
0140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0141 offsetof(struct __sk_buff, remote_ip6)),
0142 BPF_EXIT_INSN(),
0143 },
0144 .errstr = "invalid bpf_context access",
0145 .result = REJECT,
0146 },
0147 {
0148 "invalid access __sk_buff local_ip6",
0149 .insns = {
0150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0151 offsetof(struct __sk_buff, local_ip6)),
0152 BPF_EXIT_INSN(),
0153 },
0154 .errstr = "invalid bpf_context access",
0155 .result = REJECT,
0156 },
0157 {
0158 "invalid access __sk_buff remote_port",
0159 .insns = {
0160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0161 offsetof(struct __sk_buff, remote_port)),
0162 BPF_EXIT_INSN(),
0163 },
0164 .errstr = "invalid bpf_context access",
0165 .result = REJECT,
0166 },
0167 {
0168 "invalid access __sk_buff remote_port",
0169 .insns = {
0170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0171 offsetof(struct __sk_buff, local_port)),
0172 BPF_EXIT_INSN(),
0173 },
0174 .errstr = "invalid bpf_context access",
0175 .result = REJECT,
0176 },
0177 {
0178 "valid access __sk_buff family",
0179 .insns = {
0180 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0181 offsetof(struct __sk_buff, family)),
0182 BPF_EXIT_INSN(),
0183 },
0184 .result = ACCEPT,
0185 .prog_type = BPF_PROG_TYPE_SK_SKB,
0186 },
0187 {
0188 "valid access __sk_buff remote_ip4",
0189 .insns = {
0190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0191 offsetof(struct __sk_buff, remote_ip4)),
0192 BPF_EXIT_INSN(),
0193 },
0194 .result = ACCEPT,
0195 .prog_type = BPF_PROG_TYPE_SK_SKB,
0196 },
0197 {
0198 "valid access __sk_buff local_ip4",
0199 .insns = {
0200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0201 offsetof(struct __sk_buff, local_ip4)),
0202 BPF_EXIT_INSN(),
0203 },
0204 .result = ACCEPT,
0205 .prog_type = BPF_PROG_TYPE_SK_SKB,
0206 },
0207 {
0208 "valid access __sk_buff remote_ip6",
0209 .insns = {
0210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0211 offsetof(struct __sk_buff, remote_ip6[0])),
0212 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0213 offsetof(struct __sk_buff, remote_ip6[1])),
0214 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0215 offsetof(struct __sk_buff, remote_ip6[2])),
0216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0217 offsetof(struct __sk_buff, remote_ip6[3])),
0218 BPF_EXIT_INSN(),
0219 },
0220 .result = ACCEPT,
0221 .prog_type = BPF_PROG_TYPE_SK_SKB,
0222 },
0223 {
0224 "valid access __sk_buff local_ip6",
0225 .insns = {
0226 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0227 offsetof(struct __sk_buff, local_ip6[0])),
0228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0229 offsetof(struct __sk_buff, local_ip6[1])),
0230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0231 offsetof(struct __sk_buff, local_ip6[2])),
0232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0233 offsetof(struct __sk_buff, local_ip6[3])),
0234 BPF_EXIT_INSN(),
0235 },
0236 .result = ACCEPT,
0237 .prog_type = BPF_PROG_TYPE_SK_SKB,
0238 },
0239 {
0240 "valid access __sk_buff remote_port",
0241 .insns = {
0242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0243 offsetof(struct __sk_buff, remote_port)),
0244 BPF_EXIT_INSN(),
0245 },
0246 .result = ACCEPT,
0247 .prog_type = BPF_PROG_TYPE_SK_SKB,
0248 },
0249 {
0250 "valid access __sk_buff remote_port",
0251 .insns = {
0252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0253 offsetof(struct __sk_buff, local_port)),
0254 BPF_EXIT_INSN(),
0255 },
0256 .result = ACCEPT,
0257 .prog_type = BPF_PROG_TYPE_SK_SKB,
0258 },
0259 {
0260 "invalid access of tc_classid for SK_SKB",
0261 .insns = {
0262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0263 offsetof(struct __sk_buff, tc_classid)),
0264 BPF_EXIT_INSN(),
0265 },
0266 .result = REJECT,
0267 .prog_type = BPF_PROG_TYPE_SK_SKB,
0268 .errstr = "invalid bpf_context access",
0269 },
0270 {
0271 "invalid access of skb->mark for SK_SKB",
0272 .insns = {
0273 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0274 offsetof(struct __sk_buff, mark)),
0275 BPF_EXIT_INSN(),
0276 },
0277 .result = REJECT,
0278 .prog_type = BPF_PROG_TYPE_SK_SKB,
0279 .errstr = "invalid bpf_context access",
0280 },
0281 {
0282 "check skb->mark is not writeable by SK_SKB",
0283 .insns = {
0284 BPF_MOV64_IMM(BPF_REG_0, 0),
0285 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0286 offsetof(struct __sk_buff, mark)),
0287 BPF_EXIT_INSN(),
0288 },
0289 .result = REJECT,
0290 .prog_type = BPF_PROG_TYPE_SK_SKB,
0291 .errstr = "invalid bpf_context access",
0292 },
0293 {
0294 "check skb->tc_index is writeable by SK_SKB",
0295 .insns = {
0296 BPF_MOV64_IMM(BPF_REG_0, 0),
0297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0298 offsetof(struct __sk_buff, tc_index)),
0299 BPF_EXIT_INSN(),
0300 },
0301 .result = ACCEPT,
0302 .prog_type = BPF_PROG_TYPE_SK_SKB,
0303 },
0304 {
0305 "check skb->priority is writeable by SK_SKB",
0306 .insns = {
0307 BPF_MOV64_IMM(BPF_REG_0, 0),
0308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0309 offsetof(struct __sk_buff, priority)),
0310 BPF_EXIT_INSN(),
0311 },
0312 .result = ACCEPT,
0313 .prog_type = BPF_PROG_TYPE_SK_SKB,
0314 },
0315 {
0316 "direct packet read for SK_SKB",
0317 .insns = {
0318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0319 offsetof(struct __sk_buff, data)),
0320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0321 offsetof(struct __sk_buff, data_end)),
0322 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
0324 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
0326 BPF_MOV64_IMM(BPF_REG_0, 0),
0327 BPF_EXIT_INSN(),
0328 },
0329 .result = ACCEPT,
0330 .prog_type = BPF_PROG_TYPE_SK_SKB,
0331 },
0332 {
0333 "direct packet write for SK_SKB",
0334 .insns = {
0335 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0336 offsetof(struct __sk_buff, data)),
0337 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0338 offsetof(struct __sk_buff, data_end)),
0339 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
0341 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
0342 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
0343 BPF_MOV64_IMM(BPF_REG_0, 0),
0344 BPF_EXIT_INSN(),
0345 },
0346 .result = ACCEPT,
0347 .prog_type = BPF_PROG_TYPE_SK_SKB,
0348 },
0349 {
0350 "overlapping checks for direct packet access SK_SKB",
0351 .insns = {
0352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
0353 offsetof(struct __sk_buff, data)),
0354 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
0355 offsetof(struct __sk_buff, data_end)),
0356 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
0358 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
0359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
0360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
0361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
0362 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
0363 BPF_MOV64_IMM(BPF_REG_0, 0),
0364 BPF_EXIT_INSN(),
0365 },
0366 .result = ACCEPT,
0367 .prog_type = BPF_PROG_TYPE_SK_SKB,
0368 },
0369 {
0370 "check skb->mark is not writeable by sockets",
0371 .insns = {
0372 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0373 offsetof(struct __sk_buff, mark)),
0374 BPF_EXIT_INSN(),
0375 },
0376 .errstr = "invalid bpf_context access",
0377 .errstr_unpriv = "R1 leaks addr",
0378 .result = REJECT,
0379 },
0380 {
0381 "check skb->tc_index is not writeable by sockets",
0382 .insns = {
0383 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0384 offsetof(struct __sk_buff, tc_index)),
0385 BPF_EXIT_INSN(),
0386 },
0387 .errstr = "invalid bpf_context access",
0388 .errstr_unpriv = "R1 leaks addr",
0389 .result = REJECT,
0390 },
0391 {
0392 "check cb access: byte",
0393 .insns = {
0394 BPF_MOV64_IMM(BPF_REG_0, 0),
0395 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0396 offsetof(struct __sk_buff, cb[0])),
0397 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0398 offsetof(struct __sk_buff, cb[0]) + 1),
0399 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0400 offsetof(struct __sk_buff, cb[0]) + 2),
0401 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0402 offsetof(struct __sk_buff, cb[0]) + 3),
0403 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0404 offsetof(struct __sk_buff, cb[1])),
0405 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0406 offsetof(struct __sk_buff, cb[1]) + 1),
0407 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0408 offsetof(struct __sk_buff, cb[1]) + 2),
0409 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0410 offsetof(struct __sk_buff, cb[1]) + 3),
0411 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0412 offsetof(struct __sk_buff, cb[2])),
0413 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0414 offsetof(struct __sk_buff, cb[2]) + 1),
0415 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0416 offsetof(struct __sk_buff, cb[2]) + 2),
0417 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0418 offsetof(struct __sk_buff, cb[2]) + 3),
0419 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0420 offsetof(struct __sk_buff, cb[3])),
0421 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0422 offsetof(struct __sk_buff, cb[3]) + 1),
0423 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0424 offsetof(struct __sk_buff, cb[3]) + 2),
0425 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0426 offsetof(struct __sk_buff, cb[3]) + 3),
0427 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0428 offsetof(struct __sk_buff, cb[4])),
0429 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0430 offsetof(struct __sk_buff, cb[4]) + 1),
0431 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0432 offsetof(struct __sk_buff, cb[4]) + 2),
0433 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0434 offsetof(struct __sk_buff, cb[4]) + 3),
0435 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0436 offsetof(struct __sk_buff, cb[0])),
0437 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0438 offsetof(struct __sk_buff, cb[0]) + 1),
0439 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0440 offsetof(struct __sk_buff, cb[0]) + 2),
0441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0442 offsetof(struct __sk_buff, cb[0]) + 3),
0443 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0444 offsetof(struct __sk_buff, cb[1])),
0445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0446 offsetof(struct __sk_buff, cb[1]) + 1),
0447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0448 offsetof(struct __sk_buff, cb[1]) + 2),
0449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0450 offsetof(struct __sk_buff, cb[1]) + 3),
0451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0452 offsetof(struct __sk_buff, cb[2])),
0453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0454 offsetof(struct __sk_buff, cb[2]) + 1),
0455 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0456 offsetof(struct __sk_buff, cb[2]) + 2),
0457 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0458 offsetof(struct __sk_buff, cb[2]) + 3),
0459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0460 offsetof(struct __sk_buff, cb[3])),
0461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0462 offsetof(struct __sk_buff, cb[3]) + 1),
0463 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0464 offsetof(struct __sk_buff, cb[3]) + 2),
0465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0466 offsetof(struct __sk_buff, cb[3]) + 3),
0467 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0468 offsetof(struct __sk_buff, cb[4])),
0469 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0470 offsetof(struct __sk_buff, cb[4]) + 1),
0471 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0472 offsetof(struct __sk_buff, cb[4]) + 2),
0473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0474 offsetof(struct __sk_buff, cb[4]) + 3),
0475 BPF_EXIT_INSN(),
0476 },
0477 .result = ACCEPT,
0478 },
0479 {
0480 "__sk_buff->hash, offset 0, byte store not permitted",
0481 .insns = {
0482 BPF_MOV64_IMM(BPF_REG_0, 0),
0483 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0484 offsetof(struct __sk_buff, hash)),
0485 BPF_EXIT_INSN(),
0486 },
0487 .errstr = "invalid bpf_context access",
0488 .result = REJECT,
0489 },
0490 {
0491 "__sk_buff->tc_index, offset 3, byte store not permitted",
0492 .insns = {
0493 BPF_MOV64_IMM(BPF_REG_0, 0),
0494 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0495 offsetof(struct __sk_buff, tc_index) + 3),
0496 BPF_EXIT_INSN(),
0497 },
0498 .errstr = "invalid bpf_context access",
0499 .result = REJECT,
0500 },
0501 {
0502 "check skb->hash byte load permitted",
0503 .insns = {
0504 BPF_MOV64_IMM(BPF_REG_0, 0),
0505 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0506 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0507 offsetof(struct __sk_buff, hash)),
0508 #else
0509 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0510 offsetof(struct __sk_buff, hash) + 3),
0511 #endif
0512 BPF_EXIT_INSN(),
0513 },
0514 .result = ACCEPT,
0515 },
0516 {
0517 "check skb->hash byte load permitted 1",
0518 .insns = {
0519 BPF_MOV64_IMM(BPF_REG_0, 0),
0520 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0521 offsetof(struct __sk_buff, hash) + 1),
0522 BPF_EXIT_INSN(),
0523 },
0524 .result = ACCEPT,
0525 },
0526 {
0527 "check skb->hash byte load permitted 2",
0528 .insns = {
0529 BPF_MOV64_IMM(BPF_REG_0, 0),
0530 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0531 offsetof(struct __sk_buff, hash) + 2),
0532 BPF_EXIT_INSN(),
0533 },
0534 .result = ACCEPT,
0535 },
0536 {
0537 "check skb->hash byte load permitted 3",
0538 .insns = {
0539 BPF_MOV64_IMM(BPF_REG_0, 0),
0540 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0541 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0542 offsetof(struct __sk_buff, hash) + 3),
0543 #else
0544 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0545 offsetof(struct __sk_buff, hash)),
0546 #endif
0547 BPF_EXIT_INSN(),
0548 },
0549 .result = ACCEPT,
0550 },
0551 {
0552 "check cb access: byte, wrong type",
0553 .insns = {
0554 BPF_MOV64_IMM(BPF_REG_0, 0),
0555 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
0556 offsetof(struct __sk_buff, cb[0])),
0557 BPF_EXIT_INSN(),
0558 },
0559 .errstr = "invalid bpf_context access",
0560 .result = REJECT,
0561 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
0562 },
0563 {
0564 "check cb access: half",
0565 .insns = {
0566 BPF_MOV64_IMM(BPF_REG_0, 0),
0567 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0568 offsetof(struct __sk_buff, cb[0])),
0569 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0570 offsetof(struct __sk_buff, cb[0]) + 2),
0571 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0572 offsetof(struct __sk_buff, cb[1])),
0573 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0574 offsetof(struct __sk_buff, cb[1]) + 2),
0575 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0576 offsetof(struct __sk_buff, cb[2])),
0577 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0578 offsetof(struct __sk_buff, cb[2]) + 2),
0579 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0580 offsetof(struct __sk_buff, cb[3])),
0581 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0582 offsetof(struct __sk_buff, cb[3]) + 2),
0583 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0584 offsetof(struct __sk_buff, cb[4])),
0585 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0586 offsetof(struct __sk_buff, cb[4]) + 2),
0587 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0588 offsetof(struct __sk_buff, cb[0])),
0589 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0590 offsetof(struct __sk_buff, cb[0]) + 2),
0591 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0592 offsetof(struct __sk_buff, cb[1])),
0593 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0594 offsetof(struct __sk_buff, cb[1]) + 2),
0595 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0596 offsetof(struct __sk_buff, cb[2])),
0597 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0598 offsetof(struct __sk_buff, cb[2]) + 2),
0599 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0600 offsetof(struct __sk_buff, cb[3])),
0601 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0602 offsetof(struct __sk_buff, cb[3]) + 2),
0603 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0604 offsetof(struct __sk_buff, cb[4])),
0605 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0606 offsetof(struct __sk_buff, cb[4]) + 2),
0607 BPF_EXIT_INSN(),
0608 },
0609 .result = ACCEPT,
0610 },
0611 {
0612 "check cb access: half, unaligned",
0613 .insns = {
0614 BPF_MOV64_IMM(BPF_REG_0, 0),
0615 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0616 offsetof(struct __sk_buff, cb[0]) + 1),
0617 BPF_EXIT_INSN(),
0618 },
0619 .errstr = "misaligned context access",
0620 .result = REJECT,
0621 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0622 },
0623 {
0624 "check __sk_buff->hash, offset 0, half store not permitted",
0625 .insns = {
0626 BPF_MOV64_IMM(BPF_REG_0, 0),
0627 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0628 offsetof(struct __sk_buff, hash)),
0629 BPF_EXIT_INSN(),
0630 },
0631 .errstr = "invalid bpf_context access",
0632 .result = REJECT,
0633 },
0634 {
0635 "check __sk_buff->tc_index, offset 2, half store not permitted",
0636 .insns = {
0637 BPF_MOV64_IMM(BPF_REG_0, 0),
0638 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0639 offsetof(struct __sk_buff, tc_index) + 2),
0640 BPF_EXIT_INSN(),
0641 },
0642 .errstr = "invalid bpf_context access",
0643 .result = REJECT,
0644 },
0645 {
0646 "check skb->hash half load permitted",
0647 .insns = {
0648 BPF_MOV64_IMM(BPF_REG_0, 0),
0649 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0650 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0651 offsetof(struct __sk_buff, hash)),
0652 #else
0653 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0654 offsetof(struct __sk_buff, hash) + 2),
0655 #endif
0656 BPF_EXIT_INSN(),
0657 },
0658 .result = ACCEPT,
0659 },
0660 {
0661 "check skb->hash half load permitted 2",
0662 .insns = {
0663 BPF_MOV64_IMM(BPF_REG_0, 0),
0664 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0665 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0666 offsetof(struct __sk_buff, hash) + 2),
0667 #else
0668 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0669 offsetof(struct __sk_buff, hash)),
0670 #endif
0671 BPF_EXIT_INSN(),
0672 },
0673 .result = ACCEPT,
0674 },
0675 {
0676 "check skb->hash half load not permitted, unaligned 1",
0677 .insns = {
0678 BPF_MOV64_IMM(BPF_REG_0, 0),
0679 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0680 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0681 offsetof(struct __sk_buff, hash) + 1),
0682 #else
0683 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0684 offsetof(struct __sk_buff, hash) + 3),
0685 #endif
0686 BPF_EXIT_INSN(),
0687 },
0688 .errstr = "invalid bpf_context access",
0689 .result = REJECT,
0690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
0691 },
0692 {
0693 "check skb->hash half load not permitted, unaligned 3",
0694 .insns = {
0695 BPF_MOV64_IMM(BPF_REG_0, 0),
0696 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0697 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0698 offsetof(struct __sk_buff, hash) + 3),
0699 #else
0700 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0701 offsetof(struct __sk_buff, hash) + 1),
0702 #endif
0703 BPF_EXIT_INSN(),
0704 },
0705 .errstr = "invalid bpf_context access",
0706 .result = REJECT,
0707 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
0708 },
0709 {
0710 "check cb access: half, wrong type",
0711 .insns = {
0712 BPF_MOV64_IMM(BPF_REG_0, 0),
0713 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
0714 offsetof(struct __sk_buff, cb[0])),
0715 BPF_EXIT_INSN(),
0716 },
0717 .errstr = "invalid bpf_context access",
0718 .result = REJECT,
0719 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
0720 },
0721 {
0722 "check cb access: word",
0723 .insns = {
0724 BPF_MOV64_IMM(BPF_REG_0, 0),
0725 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0726 offsetof(struct __sk_buff, cb[0])),
0727 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0728 offsetof(struct __sk_buff, cb[1])),
0729 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0730 offsetof(struct __sk_buff, cb[2])),
0731 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0732 offsetof(struct __sk_buff, cb[3])),
0733 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0734 offsetof(struct __sk_buff, cb[4])),
0735 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0736 offsetof(struct __sk_buff, cb[0])),
0737 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0738 offsetof(struct __sk_buff, cb[1])),
0739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0740 offsetof(struct __sk_buff, cb[2])),
0741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0742 offsetof(struct __sk_buff, cb[3])),
0743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0744 offsetof(struct __sk_buff, cb[4])),
0745 BPF_EXIT_INSN(),
0746 },
0747 .result = ACCEPT,
0748 },
0749 {
0750 "check cb access: word, unaligned 1",
0751 .insns = {
0752 BPF_MOV64_IMM(BPF_REG_0, 0),
0753 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0754 offsetof(struct __sk_buff, cb[0]) + 2),
0755 BPF_EXIT_INSN(),
0756 },
0757 .errstr = "misaligned context access",
0758 .result = REJECT,
0759 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0760 },
0761 {
0762 "check cb access: word, unaligned 2",
0763 .insns = {
0764 BPF_MOV64_IMM(BPF_REG_0, 0),
0765 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0766 offsetof(struct __sk_buff, cb[4]) + 1),
0767 BPF_EXIT_INSN(),
0768 },
0769 .errstr = "misaligned context access",
0770 .result = REJECT,
0771 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0772 },
0773 {
0774 "check cb access: word, unaligned 3",
0775 .insns = {
0776 BPF_MOV64_IMM(BPF_REG_0, 0),
0777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0778 offsetof(struct __sk_buff, cb[4]) + 2),
0779 BPF_EXIT_INSN(),
0780 },
0781 .errstr = "misaligned context access",
0782 .result = REJECT,
0783 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0784 },
0785 {
0786 "check cb access: word, unaligned 4",
0787 .insns = {
0788 BPF_MOV64_IMM(BPF_REG_0, 0),
0789 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0790 offsetof(struct __sk_buff, cb[4]) + 3),
0791 BPF_EXIT_INSN(),
0792 },
0793 .errstr = "misaligned context access",
0794 .result = REJECT,
0795 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0796 },
0797 {
0798 "check cb access: double",
0799 .insns = {
0800 BPF_MOV64_IMM(BPF_REG_0, 0),
0801 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0802 offsetof(struct __sk_buff, cb[0])),
0803 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0804 offsetof(struct __sk_buff, cb[2])),
0805 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
0806 offsetof(struct __sk_buff, cb[0])),
0807 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
0808 offsetof(struct __sk_buff, cb[2])),
0809 BPF_EXIT_INSN(),
0810 },
0811 .result = ACCEPT,
0812 },
0813 {
0814 "check cb access: double, unaligned 1",
0815 .insns = {
0816 BPF_MOV64_IMM(BPF_REG_0, 0),
0817 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0818 offsetof(struct __sk_buff, cb[1])),
0819 BPF_EXIT_INSN(),
0820 },
0821 .errstr = "misaligned context access",
0822 .result = REJECT,
0823 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0824 },
0825 {
0826 "check cb access: double, unaligned 2",
0827 .insns = {
0828 BPF_MOV64_IMM(BPF_REG_0, 0),
0829 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0830 offsetof(struct __sk_buff, cb[3])),
0831 BPF_EXIT_INSN(),
0832 },
0833 .errstr = "misaligned context access",
0834 .result = REJECT,
0835 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0836 },
0837 {
0838 "check cb access: double, oob 1",
0839 .insns = {
0840 BPF_MOV64_IMM(BPF_REG_0, 0),
0841 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0842 offsetof(struct __sk_buff, cb[4])),
0843 BPF_EXIT_INSN(),
0844 },
0845 .errstr = "invalid bpf_context access",
0846 .result = REJECT,
0847 },
0848 {
0849 "check cb access: double, oob 2",
0850 .insns = {
0851 BPF_MOV64_IMM(BPF_REG_0, 0),
0852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
0853 offsetof(struct __sk_buff, cb[4])),
0854 BPF_EXIT_INSN(),
0855 },
0856 .errstr = "invalid bpf_context access",
0857 .result = REJECT,
0858 },
0859 {
0860 "check __sk_buff->ifindex dw store not permitted",
0861 .insns = {
0862 BPF_MOV64_IMM(BPF_REG_0, 0),
0863 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0864 offsetof(struct __sk_buff, ifindex)),
0865 BPF_EXIT_INSN(),
0866 },
0867 .errstr = "invalid bpf_context access",
0868 .result = REJECT,
0869 },
0870 {
0871 "check __sk_buff->ifindex dw load not permitted",
0872 .insns = {
0873 BPF_MOV64_IMM(BPF_REG_0, 0),
0874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
0875 offsetof(struct __sk_buff, ifindex)),
0876 BPF_EXIT_INSN(),
0877 },
0878 .errstr = "invalid bpf_context access",
0879 .result = REJECT,
0880 },
0881 {
0882 "check cb access: double, wrong type",
0883 .insns = {
0884 BPF_MOV64_IMM(BPF_REG_0, 0),
0885 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0886 offsetof(struct __sk_buff, cb[0])),
0887 BPF_EXIT_INSN(),
0888 },
0889 .errstr = "invalid bpf_context access",
0890 .result = REJECT,
0891 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
0892 },
0893 {
0894 "check out of range skb->cb access",
0895 .insns = {
0896 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0897 offsetof(struct __sk_buff, cb[0]) + 256),
0898 BPF_EXIT_INSN(),
0899 },
0900 .errstr = "invalid bpf_context access",
0901 .errstr_unpriv = "",
0902 .result = REJECT,
0903 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
0904 },
0905 {
0906 "write skb fields from socket prog",
0907 .insns = {
0908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0909 offsetof(struct __sk_buff, cb[4])),
0910 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
0911 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0912 offsetof(struct __sk_buff, mark)),
0913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0914 offsetof(struct __sk_buff, tc_index)),
0915 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
0916 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0917 offsetof(struct __sk_buff, cb[0])),
0918 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0919 offsetof(struct __sk_buff, cb[2])),
0920 BPF_EXIT_INSN(),
0921 },
0922 .result = ACCEPT,
0923 .errstr_unpriv = "R1 leaks addr",
0924 .result_unpriv = REJECT,
0925 },
0926 {
0927 "write skb fields from tc_cls_act prog",
0928 .insns = {
0929 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0930 offsetof(struct __sk_buff, cb[0])),
0931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0932 offsetof(struct __sk_buff, mark)),
0933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0934 offsetof(struct __sk_buff, tc_index)),
0935 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0936 offsetof(struct __sk_buff, tc_index)),
0937 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0938 offsetof(struct __sk_buff, cb[3])),
0939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
0940 offsetof(struct __sk_buff, tstamp)),
0941 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
0942 offsetof(struct __sk_buff, tstamp)),
0943 BPF_EXIT_INSN(),
0944 },
0945 .errstr_unpriv = "",
0946 .result_unpriv = REJECT,
0947 .result = ACCEPT,
0948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0949 },
0950 {
0951 "check skb->data half load not permitted",
0952 .insns = {
0953 BPF_MOV64_IMM(BPF_REG_0, 0),
0954 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0955 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0956 offsetof(struct __sk_buff, data)),
0957 #else
0958 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0959 offsetof(struct __sk_buff, data) + 2),
0960 #endif
0961 BPF_EXIT_INSN(),
0962 },
0963 .result = REJECT,
0964 .errstr = "invalid bpf_context access",
0965 },
0966 {
0967 "read gso_segs from CGROUP_SKB",
0968 .insns = {
0969 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0970 offsetof(struct __sk_buff, gso_segs)),
0971 BPF_MOV64_IMM(BPF_REG_0, 0),
0972 BPF_EXIT_INSN(),
0973 },
0974 .result = ACCEPT,
0975 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
0976 },
0977 {
0978 "read gso_segs from CGROUP_SKB",
0979 .insns = {
0980 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0981 offsetof(struct __sk_buff, gso_segs)),
0982 BPF_MOV64_IMM(BPF_REG_0, 0),
0983 BPF_EXIT_INSN(),
0984 },
0985 .result = ACCEPT,
0986 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
0987 },
0988 {
0989 "write gso_segs from CGROUP_SKB",
0990 .insns = {
0991 BPF_MOV64_IMM(BPF_REG_0, 0),
0992 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
0993 offsetof(struct __sk_buff, gso_segs)),
0994 BPF_MOV64_IMM(BPF_REG_0, 0),
0995 BPF_EXIT_INSN(),
0996 },
0997 .result = REJECT,
0998 .result_unpriv = REJECT,
0999 .errstr = "invalid bpf_context access off=164 size=4",
1000 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1001 },
1002 {
1003 "read gso_segs from CLS",
1004 .insns = {
1005 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1006 offsetof(struct __sk_buff, gso_segs)),
1007 BPF_MOV64_IMM(BPF_REG_0, 0),
1008 BPF_EXIT_INSN(),
1009 },
1010 .result = ACCEPT,
1011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1012 },
1013 {
1014 "read gso_size from CGROUP_SKB",
1015 .insns = {
1016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1017 offsetof(struct __sk_buff, gso_size)),
1018 BPF_MOV64_IMM(BPF_REG_0, 0),
1019 BPF_EXIT_INSN(),
1020 },
1021 .result = ACCEPT,
1022 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1023 },
1024 {
1025 "read gso_size from CGROUP_SKB",
1026 .insns = {
1027 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1028 offsetof(struct __sk_buff, gso_size)),
1029 BPF_MOV64_IMM(BPF_REG_0, 0),
1030 BPF_EXIT_INSN(),
1031 },
1032 .result = ACCEPT,
1033 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1034 },
1035 {
1036 "write gso_size from CGROUP_SKB",
1037 .insns = {
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1039 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1040 offsetof(struct __sk_buff, gso_size)),
1041 BPF_MOV64_IMM(BPF_REG_0, 0),
1042 BPF_EXIT_INSN(),
1043 },
1044 .result = REJECT,
1045 .result_unpriv = REJECT,
1046 .errstr = "invalid bpf_context access off=176 size=4",
1047 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1048 },
1049 {
1050 "read gso_size from CLS",
1051 .insns = {
1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, gso_size)),
1054 BPF_MOV64_IMM(BPF_REG_0, 0),
1055 BPF_EXIT_INSN(),
1056 },
1057 .result = ACCEPT,
1058 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1059 },
1060 {
1061 "padding after gso_size is not accessible",
1062 .insns = {
1063 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1064 offsetofend(struct __sk_buff, gso_size)),
1065 BPF_MOV64_IMM(BPF_REG_0, 0),
1066 BPF_EXIT_INSN(),
1067 },
1068 .result = REJECT,
1069 .result_unpriv = REJECT,
1070 .errstr = "invalid bpf_context access off=180 size=4",
1071 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1072 },
1073 {
1074 "read hwtstamp from CGROUP_SKB",
1075 .insns = {
1076 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1077 offsetof(struct __sk_buff, hwtstamp)),
1078 BPF_MOV64_IMM(BPF_REG_0, 0),
1079 BPF_EXIT_INSN(),
1080 },
1081 .result = ACCEPT,
1082 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1083 },
1084 {
1085 "read hwtstamp from CGROUP_SKB",
1086 .insns = {
1087 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1088 offsetof(struct __sk_buff, hwtstamp)),
1089 BPF_MOV64_IMM(BPF_REG_0, 0),
1090 BPF_EXIT_INSN(),
1091 },
1092 .result = ACCEPT,
1093 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1094 },
1095 {
1096 "write hwtstamp from CGROUP_SKB",
1097 .insns = {
1098 BPF_MOV64_IMM(BPF_REG_0, 0),
1099 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1100 offsetof(struct __sk_buff, hwtstamp)),
1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1102 BPF_EXIT_INSN(),
1103 },
1104 .result = REJECT,
1105 .result_unpriv = REJECT,
1106 .errstr = "invalid bpf_context access off=184 size=8",
1107 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1108 },
1109 {
1110 "read hwtstamp from CLS",
1111 .insns = {
1112 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1113 offsetof(struct __sk_buff, hwtstamp)),
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1115 BPF_EXIT_INSN(),
1116 },
1117 .result = ACCEPT,
1118 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1119 },
1120 {
1121 "check wire_len is not readable by sockets",
1122 .insns = {
1123 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1124 offsetof(struct __sk_buff, wire_len)),
1125 BPF_EXIT_INSN(),
1126 },
1127 .errstr = "invalid bpf_context access",
1128 .result = REJECT,
1129 },
1130 {
1131 "check wire_len is readable by tc classifier",
1132 .insns = {
1133 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1134 offsetof(struct __sk_buff, wire_len)),
1135 BPF_EXIT_INSN(),
1136 },
1137 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1138 .result = ACCEPT,
1139 },
1140 {
1141 "check wire_len is not writable by tc classifier",
1142 .insns = {
1143 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1144 offsetof(struct __sk_buff, wire_len)),
1145 BPF_EXIT_INSN(),
1146 },
1147 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1148 .errstr = "invalid bpf_context access",
1149 .errstr_unpriv = "R1 leaks addr",
1150 .result = REJECT,
1151 },
1152 {
1153 "pkt > pkt_end taken check",
1154 .insns = {
1155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1156 offsetof(struct __sk_buff, data_end)),
1157 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
1158 offsetof(struct __sk_buff, data)),
1159 BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),
1160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),
1161 BPF_MOV64_IMM(BPF_REG_1, 0),
1162 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),
1163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),
1164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
1165 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
1166 BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),
1167 BPF_MOV64_IMM(BPF_REG_0, 0),
1168 BPF_EXIT_INSN(),
1169 },
1170 .result = ACCEPT,
1171 .prog_type = BPF_PROG_TYPE_SK_SKB,
1172 },
1173 {
1174 "pkt_end < pkt taken check",
1175 .insns = {
1176 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1177 offsetof(struct __sk_buff, data_end)),
1178 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
1179 offsetof(struct __sk_buff, data)),
1180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),
1181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),
1182 BPF_MOV64_IMM(BPF_REG_1, 0),
1183 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),
1184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),
1185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
1186 BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1),
1187 BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),
1188 BPF_MOV64_IMM(BPF_REG_0, 0),
1189 BPF_EXIT_INSN(),
1190 },
1191 .result = ACCEPT,
1192 .prog_type = BPF_PROG_TYPE_SK_SKB,
1193 },