0001 {
0002 "check bpf_perf_event_data->sample_period byte load permitted",
0003 .insns = {
0004 BPF_MOV64_IMM(BPF_REG_0, 0),
0005 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0006 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0007 offsetof(struct bpf_perf_event_data, sample_period)),
0008 #else
0009 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
0010 offsetof(struct bpf_perf_event_data, sample_period) + 7),
0011 #endif
0012 BPF_EXIT_INSN(),
0013 },
0014 .result = ACCEPT,
0015 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
0016 },
0017 {
0018 "check bpf_perf_event_data->sample_period half load permitted",
0019 .insns = {
0020 BPF_MOV64_IMM(BPF_REG_0, 0),
0021 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0022 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0023 offsetof(struct bpf_perf_event_data, sample_period)),
0024 #else
0025 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
0026 offsetof(struct bpf_perf_event_data, sample_period) + 6),
0027 #endif
0028 BPF_EXIT_INSN(),
0029 },
0030 .result = ACCEPT,
0031 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
0032 },
0033 {
0034 "check bpf_perf_event_data->sample_period word load permitted",
0035 .insns = {
0036 BPF_MOV64_IMM(BPF_REG_0, 0),
0037 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
0038 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0039 offsetof(struct bpf_perf_event_data, sample_period)),
0040 #else
0041 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0042 offsetof(struct bpf_perf_event_data, sample_period) + 4),
0043 #endif
0044 BPF_EXIT_INSN(),
0045 },
0046 .result = ACCEPT,
0047 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
0048 },
0049 {
0050 "check bpf_perf_event_data->sample_period dword load permitted",
0051 .insns = {
0052 BPF_MOV64_IMM(BPF_REG_0, 0),
0053 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
0054 offsetof(struct bpf_perf_event_data, sample_period)),
0055 BPF_EXIT_INSN(),
0056 },
0057 .result = ACCEPT,
0058 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
0059 },