0001
0002
0003
0004
0005
0006
0007 #include <linux/skbuff.h>
0008 #include <linux/netdevice.h>
0009 #include <linux/version.h>
0010 #include <uapi/linux/bpf.h>
0011 #include <bpf/bpf_helpers.h>
0012 #include <bpf/bpf_tracing.h>
0013 #include <bpf/bpf_core_read.h>
0014 #include "trace_common.h"
0015
0016 #define MAX_ENTRIES 1000
0017 #define MAX_NR_CPUS 1024
0018
0019 struct {
0020 __uint(type, BPF_MAP_TYPE_HASH);
0021 __type(key, u32);
0022 __type(value, long);
0023 __uint(max_entries, MAX_ENTRIES);
0024 } hash_map SEC(".maps");
0025
0026 struct {
0027 __uint(type, BPF_MAP_TYPE_LRU_HASH);
0028 __type(key, u32);
0029 __type(value, long);
0030 __uint(max_entries, 10000);
0031 } lru_hash_map SEC(".maps");
0032
0033 struct {
0034 __uint(type, BPF_MAP_TYPE_LRU_HASH);
0035 __type(key, u32);
0036 __type(value, long);
0037 __uint(max_entries, 10000);
0038 __uint(map_flags, BPF_F_NO_COMMON_LRU);
0039 } nocommon_lru_hash_map SEC(".maps");
0040
0041 struct inner_lru {
0042 __uint(type, BPF_MAP_TYPE_LRU_HASH);
0043 __type(key, u32);
0044 __type(value, long);
0045 __uint(max_entries, MAX_ENTRIES);
0046 __uint(map_flags, BPF_F_NUMA_NODE);
0047 __uint(numa_node, 0);
0048 } inner_lru_hash_map SEC(".maps");
0049
0050 struct {
0051 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
0052 __uint(max_entries, MAX_NR_CPUS);
0053 __uint(key_size, sizeof(u32));
0054 __array(values, struct inner_lru);
0055 } array_of_lru_hashs SEC(".maps") = {
0056
0057 .values = { &inner_lru_hash_map },
0058 };
0059
0060 struct {
0061 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
0062 __uint(key_size, sizeof(u32));
0063 __uint(value_size, sizeof(long));
0064 __uint(max_entries, MAX_ENTRIES);
0065 } percpu_hash_map SEC(".maps");
0066
0067 struct {
0068 __uint(type, BPF_MAP_TYPE_HASH);
0069 __type(key, u32);
0070 __type(value, long);
0071 __uint(max_entries, MAX_ENTRIES);
0072 __uint(map_flags, BPF_F_NO_PREALLOC);
0073 } hash_map_alloc SEC(".maps");
0074
0075 struct {
0076 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
0077 __uint(key_size, sizeof(u32));
0078 __uint(value_size, sizeof(long));
0079 __uint(max_entries, MAX_ENTRIES);
0080 __uint(map_flags, BPF_F_NO_PREALLOC);
0081 } percpu_hash_map_alloc SEC(".maps");
0082
0083 struct {
0084 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
0085 __uint(key_size, 8);
0086 __uint(value_size, sizeof(long));
0087 __uint(max_entries, 10000);
0088 __uint(map_flags, BPF_F_NO_PREALLOC);
0089 } lpm_trie_map_alloc SEC(".maps");
0090
0091 struct {
0092 __uint(type, BPF_MAP_TYPE_ARRAY);
0093 __type(key, u32);
0094 __type(value, long);
0095 __uint(max_entries, MAX_ENTRIES);
0096 } array_map SEC(".maps");
0097
0098 struct {
0099 __uint(type, BPF_MAP_TYPE_LRU_HASH);
0100 __type(key, u32);
0101 __type(value, long);
0102 __uint(max_entries, MAX_ENTRIES);
0103 } lru_hash_lookup_map SEC(".maps");
0104
0105 SEC("kprobe/" SYSCALL(sys_getuid))
0106 int stress_hmap(struct pt_regs *ctx)
0107 {
0108 u32 key = bpf_get_current_pid_tgid();
0109 long init_val = 1;
0110 long *value;
0111
0112 bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
0113 value = bpf_map_lookup_elem(&hash_map, &key);
0114 if (value)
0115 bpf_map_delete_elem(&hash_map, &key);
0116
0117 return 0;
0118 }
0119
0120 SEC("kprobe/" SYSCALL(sys_geteuid))
0121 int stress_percpu_hmap(struct pt_regs *ctx)
0122 {
0123 u32 key = bpf_get_current_pid_tgid();
0124 long init_val = 1;
0125 long *value;
0126
0127 bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
0128 value = bpf_map_lookup_elem(&percpu_hash_map, &key);
0129 if (value)
0130 bpf_map_delete_elem(&percpu_hash_map, &key);
0131 return 0;
0132 }
0133
0134 SEC("kprobe/" SYSCALL(sys_getgid))
0135 int stress_hmap_alloc(struct pt_regs *ctx)
0136 {
0137 u32 key = bpf_get_current_pid_tgid();
0138 long init_val = 1;
0139 long *value;
0140
0141 bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
0142 value = bpf_map_lookup_elem(&hash_map_alloc, &key);
0143 if (value)
0144 bpf_map_delete_elem(&hash_map_alloc, &key);
0145 return 0;
0146 }
0147
0148 SEC("kprobe/" SYSCALL(sys_getegid))
0149 int stress_percpu_hmap_alloc(struct pt_regs *ctx)
0150 {
0151 u32 key = bpf_get_current_pid_tgid();
0152 long init_val = 1;
0153 long *value;
0154
0155 bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
0156 value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
0157 if (value)
0158 bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
0159 return 0;
0160 }
0161
0162 SEC("kprobe/" SYSCALL(sys_connect))
0163 int stress_lru_hmap_alloc(struct pt_regs *ctx)
0164 {
0165 struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
0166 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
0167 union {
0168 u16 dst6[8];
0169 struct {
0170 u16 magic0;
0171 u16 magic1;
0172 u16 tcase;
0173 u16 unused16;
0174 u32 unused32;
0175 u32 key;
0176 };
0177 } test_params;
0178 struct sockaddr_in6 *in6;
0179 u16 test_case;
0180 int addrlen, ret;
0181 long val = 1;
0182 u32 key = 0;
0183
0184 in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
0185 addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
0186
0187 if (addrlen != sizeof(*in6))
0188 return 0;
0189
0190 ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
0191 &in6->sin6_addr);
0192 if (ret)
0193 goto done;
0194
0195 if (test_params.magic0 != 0xdead ||
0196 test_params.magic1 != 0xbeef)
0197 return 0;
0198
0199 test_case = test_params.tcase;
0200 if (test_case != 3)
0201 key = bpf_get_prandom_u32();
0202
0203 if (test_case == 0) {
0204 ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
0205 } else if (test_case == 1) {
0206 ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
0207 BPF_ANY);
0208 } else if (test_case == 2) {
0209 void *nolocal_lru_map;
0210 int cpu = bpf_get_smp_processor_id();
0211
0212 nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
0213 &cpu);
0214 if (!nolocal_lru_map) {
0215 ret = -ENOENT;
0216 goto done;
0217 }
0218
0219 ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
0220 BPF_ANY);
0221 } else if (test_case == 3) {
0222 u32 i;
0223
0224 key = test_params.key;
0225
0226 #pragma clang loop unroll(full)
0227 for (i = 0; i < 32; i++) {
0228 bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
0229 key++;
0230 }
0231 } else {
0232 ret = -EINVAL;
0233 }
0234
0235 done:
0236 if (ret)
0237 bpf_trace_printk(fmt, sizeof(fmt), ret);
0238
0239 return 0;
0240 }
0241
0242 SEC("kprobe/" SYSCALL(sys_gettid))
0243 int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
0244 {
0245 union {
0246 u32 b32[2];
0247 u8 b8[8];
0248 } key;
0249 unsigned int i;
0250
0251 key.b32[0] = 32;
0252 key.b8[4] = 192;
0253 key.b8[5] = 168;
0254 key.b8[6] = 0;
0255 key.b8[7] = 1;
0256
0257 #pragma clang loop unroll(full)
0258 for (i = 0; i < 32; ++i)
0259 bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
0260
0261 return 0;
0262 }
0263
0264 SEC("kprobe/" SYSCALL(sys_getpgid))
0265 int stress_hash_map_lookup(struct pt_regs *ctx)
0266 {
0267 u32 key = 1, i;
0268 long *value;
0269
0270 #pragma clang loop unroll(full)
0271 for (i = 0; i < 64; ++i)
0272 value = bpf_map_lookup_elem(&hash_map, &key);
0273
0274 return 0;
0275 }
0276
0277 SEC("kprobe/" SYSCALL(sys_getppid))
0278 int stress_array_map_lookup(struct pt_regs *ctx)
0279 {
0280 u32 key = 1, i;
0281 long *value;
0282
0283 #pragma clang loop unroll(full)
0284 for (i = 0; i < 64; ++i)
0285 value = bpf_map_lookup_elem(&array_map, &key);
0286
0287 return 0;
0288 }
0289
0290 char _license[] SEC("license") = "GPL";
0291 u32 _version SEC("version") = LINUX_VERSION_CODE;