0001
0002
0003 #include "vmlinux.h"
0004
0005 #include <bpf/bpf_helpers.h>
0006 #include <bpf/bpf_tracing.h>
0007
0008 char _license[] SEC("license") = "GPL";
0009
0010 #define USEC_PER_SEC 1000000UL
0011
0012 #define min(a, b) ((a) < (b) ? (a) : (b))
0013
0014 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
0015 {
0016 return (struct tcp_sock *)sk;
0017 }
0018
0019 SEC("struct_ops/write_sk_pacing_init")
0020 void BPF_PROG(write_sk_pacing_init, struct sock *sk)
0021 {
0022 #ifdef ENABLE_ATOMICS_TESTS
0023 __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
0024 SK_PACING_NEEDED);
0025 #else
0026 sk->sk_pacing_status = SK_PACING_NEEDED;
0027 #endif
0028 }
0029
0030 SEC("struct_ops/write_sk_pacing_cong_control")
0031 void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
0032 const struct rate_sample *rs)
0033 {
0034 const struct tcp_sock *tp = tcp_sk(sk);
0035 unsigned long rate =
0036 ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
0037 (tp->srtt_us ?: 1U << 3);
0038 sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
0039 }
0040
0041 SEC("struct_ops/write_sk_pacing_ssthresh")
0042 __u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
0043 {
0044 return tcp_sk(sk)->snd_ssthresh;
0045 }
0046
0047 SEC("struct_ops/write_sk_pacing_undo_cwnd")
0048 __u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
0049 {
0050 return tcp_sk(sk)->snd_cwnd;
0051 }
0052
0053 SEC(".struct_ops")
0054 struct tcp_congestion_ops write_sk_pacing = {
0055 .init = (void *)write_sk_pacing_init,
0056 .cong_control = (void *)write_sk_pacing_cong_control,
0057 .ssthresh = (void *)write_sk_pacing_ssthresh,
0058 .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
0059 .name = "bpf_w_sk_pacing",
0060 };