0001
0002
0003 #include <linux/bpf.h>
0004 #include <stdbool.h>
0005 #include <bpf/bpf_helpers.h>
0006 #include <bpf/bpf_endian.h>
0007 #include <bpf/bpf_tracing.h>
0008
0009 char _license[] SEC("license") = "GPL";
0010 struct {
0011 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
0012 __type(key, int);
0013 __type(value, int);
0014 } perf_buf_map SEC(".maps");
0015
0016 #define _(P) (__builtin_preserve_access_index(P))
0017
0018
0019 struct callback_head {
0020 struct callback_head *next;
0021 void (*func)(struct callback_head *head);
0022 };
0023 struct dev_ifalias {
0024 struct callback_head rcuhead;
0025 };
0026
0027 struct net_device {
0028 int ifindex;
0029 struct dev_ifalias *ifalias;
0030 };
0031
0032 typedef struct {
0033 int counter;
0034 } atomic_t;
0035 typedef struct refcount_struct {
0036 atomic_t refs;
0037 } refcount_t;
0038
0039 struct sk_buff {
0040
0041 unsigned int len, data_len;
0042 __u16 mac_len, hdr_len, queue_mapping;
0043 struct net_device *dev;
0044
0045 refcount_t users;
0046 unsigned char *data;
0047 char __pkt_type_offset[0];
0048 char cb[48];
0049 };
0050
0051 struct meta {
0052 int ifindex;
0053 __u32 cb32_0;
0054 __u8 cb8_0;
0055 };
0056
0057
0058
0059
0060 SEC("tp_btf/kfree_skb")
0061 int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
0062 {
0063 struct net_device *dev;
0064 struct callback_head *ptr;
0065 void *func;
0066 int users;
0067 unsigned char *data;
0068 unsigned short pkt_data;
0069 struct meta meta = {};
0070 char pkt_type;
0071 __u32 *cb32;
0072 __u8 *cb8;
0073
0074 __builtin_preserve_access_index(({
0075 users = skb->users.refs.counter;
0076 data = skb->data;
0077 dev = skb->dev;
0078 ptr = dev->ifalias->rcuhead.next;
0079 func = ptr->func;
0080 cb8 = (__u8 *)&skb->cb;
0081 cb32 = (__u32 *)&skb->cb;
0082 }));
0083
0084 meta.ifindex = _(dev->ifindex);
0085 meta.cb8_0 = cb8[8];
0086 meta.cb32_0 = cb32[2];
0087
0088 bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
0089 pkt_type &= 7;
0090
0091
0092 bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12);
0093
0094 bpf_printk("rcuhead.next %llx func %llx\n", ptr, func);
0095 bpf_printk("skb->len %d users %d pkt_type %x\n",
0096 _(skb->len), users, pkt_type);
0097 bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping));
0098 bpf_printk("dev->ifindex %d data %llx pkt_data %x\n",
0099 meta.ifindex, data, pkt_data);
0100 bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0);
0101
0102 if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1)
0103
0104 return 0;
0105
0106
0107 bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU,
0108 &meta, sizeof(meta));
0109 return 0;
0110 }
0111
0112 struct {
0113 bool fentry_test_ok;
0114 bool fexit_test_ok;
0115 } result = {};
0116
0117 SEC("fentry/eth_type_trans")
0118 int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
0119 unsigned short protocol)
0120 {
0121 int len, ifindex;
0122
0123 __builtin_preserve_access_index(({
0124 len = skb->len;
0125 ifindex = dev->ifindex;
0126 }));
0127
0128
0129 if (len != 74 || ifindex != 1)
0130 return 0;
0131 result.fentry_test_ok = true;
0132 return 0;
0133 }
0134
0135 SEC("fexit/eth_type_trans")
0136 int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
0137 unsigned short protocol)
0138 {
0139 int len, ifindex;
0140
0141 __builtin_preserve_access_index(({
0142 len = skb->len;
0143 ifindex = dev->ifindex;
0144 }));
0145
0146
0147
0148
0149 if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1)
0150 return 0;
0151 result.fexit_test_ok = true;
0152 return 0;
0153 }