0001
0002
0003 #include "xdp_sample.bpf.h"
0004
0005 #include <bpf/bpf_tracing.h>
0006 #include <bpf/bpf_core_read.h>
0007 #include <bpf/bpf_helpers.h>
0008
0009 array_map rx_cnt SEC(".maps");
0010 array_map redir_err_cnt SEC(".maps");
0011 array_map cpumap_enqueue_cnt SEC(".maps");
0012 array_map cpumap_kthread_cnt SEC(".maps");
0013 array_map exception_cnt SEC(".maps");
0014 array_map devmap_xmit_cnt SEC(".maps");
0015
0016 struct {
0017 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
0018 __uint(max_entries, 32 * 32);
0019 __type(key, u64);
0020 __type(value, struct datarec);
0021 } devmap_xmit_cnt_multi SEC(".maps");
0022
0023 const volatile int nr_cpus = 0;
0024
0025
0026
0027
0028
0029 const volatile int from_match[32] = {};
0030 const volatile int to_match[32] = {};
0031
0032 int cpumap_map_id = 0;
0033
0034
0035 #define IN_SET(a, b) \
0036 ({ \
0037 bool __res = !(a)[0]; \
0038 for (int i = 0; i < ARRAY_SIZE(a) && (a)[i]; i++) { \
0039 __res = (a)[i] == (b); \
0040 if (__res) \
0041 break; \
0042 } \
0043 __res; \
0044 })
0045
0046 static __always_inline __u32 xdp_get_err_key(int err)
0047 {
0048 switch (err) {
0049 case 0:
0050 return 0;
0051 case -EINVAL:
0052 return 2;
0053 case -ENETDOWN:
0054 return 3;
0055 case -EMSGSIZE:
0056 return 4;
0057 case -EOPNOTSUPP:
0058 return 5;
0059 case -ENOSPC:
0060 return 6;
0061 default:
0062 return 1;
0063 }
0064 }
0065
0066 static __always_inline int xdp_redirect_collect_stat(int from, int err)
0067 {
0068 u32 cpu = bpf_get_smp_processor_id();
0069 u32 key = XDP_REDIRECT_ERROR;
0070 struct datarec *rec;
0071 u32 idx;
0072
0073 if (!IN_SET(from_match, from))
0074 return 0;
0075
0076 key = xdp_get_err_key(err);
0077
0078 idx = key * nr_cpus + cpu;
0079 rec = bpf_map_lookup_elem(&redir_err_cnt, &idx);
0080 if (!rec)
0081 return 0;
0082 if (key)
0083 NO_TEAR_INC(rec->dropped);
0084 else
0085 NO_TEAR_INC(rec->processed);
0086 return 0;
0087
0088
0089
0090
0091
0092
0093 }
0094
0095 SEC("tp_btf/xdp_redirect_err")
0096 int BPF_PROG(tp_xdp_redirect_err, const struct net_device *dev,
0097 const struct bpf_prog *xdp, const void *tgt, int err,
0098 const struct bpf_map *map, u32 index)
0099 {
0100 return xdp_redirect_collect_stat(dev->ifindex, err);
0101 }
0102
0103 SEC("tp_btf/xdp_redirect_map_err")
0104 int BPF_PROG(tp_xdp_redirect_map_err, const struct net_device *dev,
0105 const struct bpf_prog *xdp, const void *tgt, int err,
0106 const struct bpf_map *map, u32 index)
0107 {
0108 return xdp_redirect_collect_stat(dev->ifindex, err);
0109 }
0110
0111 SEC("tp_btf/xdp_redirect")
0112 int BPF_PROG(tp_xdp_redirect, const struct net_device *dev,
0113 const struct bpf_prog *xdp, const void *tgt, int err,
0114 const struct bpf_map *map, u32 index)
0115 {
0116 return xdp_redirect_collect_stat(dev->ifindex, err);
0117 }
0118
0119 SEC("tp_btf/xdp_redirect_map")
0120 int BPF_PROG(tp_xdp_redirect_map, const struct net_device *dev,
0121 const struct bpf_prog *xdp, const void *tgt, int err,
0122 const struct bpf_map *map, u32 index)
0123 {
0124 return xdp_redirect_collect_stat(dev->ifindex, err);
0125 }
0126
0127 SEC("tp_btf/xdp_cpumap_enqueue")
0128 int BPF_PROG(tp_xdp_cpumap_enqueue, int map_id, unsigned int processed,
0129 unsigned int drops, int to_cpu)
0130 {
0131 u32 cpu = bpf_get_smp_processor_id();
0132 struct datarec *rec;
0133 u32 idx;
0134
0135 if (cpumap_map_id && cpumap_map_id != map_id)
0136 return 0;
0137
0138 idx = to_cpu * nr_cpus + cpu;
0139 rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &idx);
0140 if (!rec)
0141 return 0;
0142 NO_TEAR_ADD(rec->processed, processed);
0143 NO_TEAR_ADD(rec->dropped, drops);
0144
0145 if (processed > 0)
0146 NO_TEAR_INC(rec->issue);
0147
0148
0149
0150
0151
0152 return 0;
0153 }
0154
0155 SEC("tp_btf/xdp_cpumap_kthread")
0156 int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
0157 unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
0158 {
0159 struct datarec *rec;
0160 u32 cpu;
0161
0162 if (cpumap_map_id && cpumap_map_id != map_id)
0163 return 0;
0164
0165 cpu = bpf_get_smp_processor_id();
0166 rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu);
0167 if (!rec)
0168 return 0;
0169 NO_TEAR_ADD(rec->processed, processed);
0170 NO_TEAR_ADD(rec->dropped, drops);
0171 NO_TEAR_ADD(rec->xdp_pass, xdp_stats->pass);
0172 NO_TEAR_ADD(rec->xdp_drop, xdp_stats->drop);
0173 NO_TEAR_ADD(rec->xdp_redirect, xdp_stats->redirect);
0174
0175 if (sched)
0176 NO_TEAR_INC(rec->issue);
0177 return 0;
0178 }
0179
0180 SEC("tp_btf/xdp_exception")
0181 int BPF_PROG(tp_xdp_exception, const struct net_device *dev,
0182 const struct bpf_prog *xdp, u32 act)
0183 {
0184 u32 cpu = bpf_get_smp_processor_id();
0185 struct datarec *rec;
0186 u32 key = act, idx;
0187
0188 if (!IN_SET(from_match, dev->ifindex))
0189 return 0;
0190 if (!IN_SET(to_match, dev->ifindex))
0191 return 0;
0192
0193 if (key > XDP_REDIRECT)
0194 key = XDP_REDIRECT + 1;
0195
0196 idx = key * nr_cpus + cpu;
0197 rec = bpf_map_lookup_elem(&exception_cnt, &idx);
0198 if (!rec)
0199 return 0;
0200 NO_TEAR_INC(rec->dropped);
0201
0202 return 0;
0203 }
0204
0205 SEC("tp_btf/xdp_devmap_xmit")
0206 int BPF_PROG(tp_xdp_devmap_xmit, const struct net_device *from_dev,
0207 const struct net_device *to_dev, int sent, int drops, int err)
0208 {
0209 struct datarec *rec;
0210 int idx_in, idx_out;
0211 u32 cpu;
0212
0213 idx_in = from_dev->ifindex;
0214 idx_out = to_dev->ifindex;
0215
0216 if (!IN_SET(from_match, idx_in))
0217 return 0;
0218 if (!IN_SET(to_match, idx_out))
0219 return 0;
0220
0221 cpu = bpf_get_smp_processor_id();
0222 rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &cpu);
0223 if (!rec)
0224 return 0;
0225 NO_TEAR_ADD(rec->processed, sent);
0226 NO_TEAR_ADD(rec->dropped, drops);
0227
0228 NO_TEAR_INC(rec->info);
0229
0230
0231 if (err || drops < 0)
0232 NO_TEAR_INC(rec->issue);
0233 return 0;
0234 }
0235
0236 SEC("tp_btf/xdp_devmap_xmit")
0237 int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device *from_dev,
0238 const struct net_device *to_dev, int sent, int drops, int err)
0239 {
0240 struct datarec empty = {};
0241 struct datarec *rec;
0242 int idx_in, idx_out;
0243 u64 idx;
0244
0245 idx_in = from_dev->ifindex;
0246 idx_out = to_dev->ifindex;
0247 idx = idx_in;
0248 idx = idx << 32 | idx_out;
0249
0250 if (!IN_SET(from_match, idx_in))
0251 return 0;
0252 if (!IN_SET(to_match, idx_out))
0253 return 0;
0254
0255 bpf_map_update_elem(&devmap_xmit_cnt_multi, &idx, &empty, BPF_NOEXIST);
0256 rec = bpf_map_lookup_elem(&devmap_xmit_cnt_multi, &idx);
0257 if (!rec)
0258 return 0;
0259
0260 NO_TEAR_ADD(rec->processed, sent);
0261 NO_TEAR_ADD(rec->dropped, drops);
0262 NO_TEAR_INC(rec->info);
0263 if (err || drops < 0)
0264 NO_TEAR_INC(rec->issue);
0265 return 0;
0266 }