0001
0002
0003
0004
0005 #include <string.h>
0006
0007 #include <linux/bpf.h>
0008 #include <linux/pkt_cls.h>
0009 #include <linux/if_ether.h>
0010 #include <linux/in.h>
0011 #include <linux/ip.h>
0012 #include <linux/ipv6.h>
0013 #include <sys/socket.h>
0014 #include <linux/tcp.h>
0015
0016 #include <bpf/bpf_helpers.h>
0017 #include <bpf/bpf_endian.h>
0018
0019 struct {
0020 __uint(type, BPF_MAP_TYPE_ARRAY);
0021 __type(key, __u32);
0022 __type(value, __u32);
0023 __uint(max_entries, 3);
0024 } results SEC(".maps");
0025
0026 static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk,
0027 void *iph, __u32 ip_size,
0028 struct tcphdr *tcph)
0029 {
0030 __u32 thlen = tcph->doff * 4;
0031
0032 if (tcph->syn && !tcph->ack) {
0033
0034 if (thlen != 24)
0035 return 0;
0036
0037 if ((void *)tcph + thlen > data_end)
0038 return 0;
0039
0040 return bpf_tcp_gen_syncookie(sk, iph, ip_size, tcph, thlen);
0041 }
0042 return 0;
0043 }
0044
0045 static __always_inline void check_syncookie(void *ctx, void *data,
0046 void *data_end)
0047 {
0048 struct bpf_sock_tuple tup;
0049 struct bpf_sock *sk;
0050 struct ethhdr *ethh;
0051 struct iphdr *ipv4h;
0052 struct ipv6hdr *ipv6h;
0053 struct tcphdr *tcph;
0054 int ret;
0055 __u32 key_mss = 2;
0056 __u32 key_gen = 1;
0057 __u32 key = 0;
0058 __s64 seq_mss;
0059
0060 ethh = data;
0061 if (ethh + 1 > data_end)
0062 return;
0063
0064 switch (bpf_ntohs(ethh->h_proto)) {
0065 case ETH_P_IP:
0066 ipv4h = data + sizeof(struct ethhdr);
0067 if (ipv4h + 1 > data_end)
0068 return;
0069
0070 if (ipv4h->ihl != 5)
0071 return;
0072
0073 tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
0074 if (tcph + 1 > data_end)
0075 return;
0076
0077 tup.ipv4.saddr = ipv4h->saddr;
0078 tup.ipv4.daddr = ipv4h->daddr;
0079 tup.ipv4.sport = tcph->source;
0080 tup.ipv4.dport = tcph->dest;
0081
0082 sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
0083 BPF_F_CURRENT_NETNS, 0);
0084 if (!sk)
0085 return;
0086
0087 if (sk->state != BPF_TCP_LISTEN)
0088 goto release;
0089
0090 seq_mss = gen_syncookie(data_end, sk, ipv4h, sizeof(*ipv4h),
0091 tcph);
0092
0093 ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
0094 tcph, sizeof(*tcph));
0095 break;
0096
0097 case ETH_P_IPV6:
0098 ipv6h = data + sizeof(struct ethhdr);
0099 if (ipv6h + 1 > data_end)
0100 return;
0101
0102 if (ipv6h->nexthdr != IPPROTO_TCP)
0103 return;
0104
0105 tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
0106 if (tcph + 1 > data_end)
0107 return;
0108
0109 memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
0110 memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
0111 tup.ipv6.sport = tcph->source;
0112 tup.ipv6.dport = tcph->dest;
0113
0114 sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
0115 BPF_F_CURRENT_NETNS, 0);
0116 if (!sk)
0117 return;
0118
0119 if (sk->state != BPF_TCP_LISTEN)
0120 goto release;
0121
0122 seq_mss = gen_syncookie(data_end, sk, ipv6h, sizeof(*ipv6h),
0123 tcph);
0124
0125 ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
0126 tcph, sizeof(*tcph));
0127 break;
0128
0129 default:
0130 return;
0131 }
0132
0133 if (seq_mss > 0) {
0134 __u32 cookie = (__u32)seq_mss;
0135 __u32 mss = seq_mss >> 32;
0136
0137 bpf_map_update_elem(&results, &key_gen, &cookie, 0);
0138 bpf_map_update_elem(&results, &key_mss, &mss, 0);
0139 }
0140
0141 if (ret == 0) {
0142 __u32 cookie = bpf_ntohl(tcph->ack_seq) - 1;
0143
0144 bpf_map_update_elem(&results, &key, &cookie, 0);
0145 }
0146
0147 release:
0148 bpf_sk_release(sk);
0149 }
0150
0151 SEC("tc")
0152 int check_syncookie_clsact(struct __sk_buff *skb)
0153 {
0154 check_syncookie(skb, (void *)(long)skb->data,
0155 (void *)(long)skb->data_end);
0156 return TC_ACT_OK;
0157 }
0158
0159 SEC("xdp")
0160 int check_syncookie_xdp(struct xdp_md *ctx)
0161 {
0162 check_syncookie(ctx, (void *)(long)ctx->data,
0163 (void *)(long)ctx->data_end);
0164 return XDP_PASS;
0165 }
0166
0167 char _license[] SEC("license") = "GPL";