Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c) 2019 Facebook  */
0003 
0004 #include <linux/init.h>
0005 #include <linux/types.h>
0006 #include <linux/bpf_verifier.h>
0007 #include <linux/bpf.h>
0008 #include <linux/btf.h>
0009 #include <linux/btf_ids.h>
0010 #include <linux/filter.h>
0011 #include <net/tcp.h>
0012 #include <net/bpf_sk_storage.h>
0013 
0014 /* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
0015 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
0016 
0017 static u32 unsupported_ops[] = {
0018     offsetof(struct tcp_congestion_ops, get_info),
0019 };
0020 
0021 static const struct btf_type *tcp_sock_type;
0022 static u32 tcp_sock_id, sock_id;
0023 
0024 static int bpf_tcp_ca_init(struct btf *btf)
0025 {
0026     s32 type_id;
0027 
0028     type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
0029     if (type_id < 0)
0030         return -EINVAL;
0031     sock_id = type_id;
0032 
0033     type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
0034     if (type_id < 0)
0035         return -EINVAL;
0036     tcp_sock_id = type_id;
0037     tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
0038 
0039     return 0;
0040 }
0041 
0042 static bool is_unsupported(u32 member_offset)
0043 {
0044     unsigned int i;
0045 
0046     for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
0047         if (member_offset == unsupported_ops[i])
0048             return true;
0049     }
0050 
0051     return false;
0052 }
0053 
0054 extern struct btf *btf_vmlinux;
0055 
0056 static bool bpf_tcp_ca_is_valid_access(int off, int size,
0057                        enum bpf_access_type type,
0058                        const struct bpf_prog *prog,
0059                        struct bpf_insn_access_aux *info)
0060 {
0061     if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
0062         return false;
0063 
0064     if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
0065         /* promote it to tcp_sock */
0066         info->btf_id = tcp_sock_id;
0067 
0068     return true;
0069 }
0070 
0071 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
0072                     const struct btf *btf,
0073                     const struct btf_type *t, int off,
0074                     int size, enum bpf_access_type atype,
0075                     u32 *next_btf_id,
0076                     enum bpf_type_flag *flag)
0077 {
0078     size_t end;
0079 
0080     if (atype == BPF_READ)
0081         return btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
0082                      flag);
0083 
0084     if (t != tcp_sock_type) {
0085         bpf_log(log, "only read is supported\n");
0086         return -EACCES;
0087     }
0088 
0089     switch (off) {
0090     case offsetof(struct sock, sk_pacing_rate):
0091         end = offsetofend(struct sock, sk_pacing_rate);
0092         break;
0093     case offsetof(struct sock, sk_pacing_status):
0094         end = offsetofend(struct sock, sk_pacing_status);
0095         break;
0096     case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
0097         end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
0098         break;
0099     case offsetof(struct inet_connection_sock, icsk_ack.pending):
0100         end = offsetofend(struct inet_connection_sock,
0101                   icsk_ack.pending);
0102         break;
0103     case offsetof(struct tcp_sock, snd_cwnd):
0104         end = offsetofend(struct tcp_sock, snd_cwnd);
0105         break;
0106     case offsetof(struct tcp_sock, snd_cwnd_cnt):
0107         end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
0108         break;
0109     case offsetof(struct tcp_sock, snd_ssthresh):
0110         end = offsetofend(struct tcp_sock, snd_ssthresh);
0111         break;
0112     case offsetof(struct tcp_sock, ecn_flags):
0113         end = offsetofend(struct tcp_sock, ecn_flags);
0114         break;
0115     default:
0116         bpf_log(log, "no write support to tcp_sock at off %d\n", off);
0117         return -EACCES;
0118     }
0119 
0120     if (off + size > end) {
0121         bpf_log(log,
0122             "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
0123             off, size, end);
0124         return -EACCES;
0125     }
0126 
0127     return NOT_INIT;
0128 }
0129 
0130 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
0131 {
0132     /* bpf_tcp_ca prog cannot have NULL tp */
0133     __tcp_send_ack((struct sock *)tp, rcv_nxt);
0134     return 0;
0135 }
0136 
0137 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
0138     .func       = bpf_tcp_send_ack,
0139     .gpl_only   = false,
0140     /* In case we want to report error later */
0141     .ret_type   = RET_INTEGER,
0142     .arg1_type  = ARG_PTR_TO_BTF_ID,
0143     .arg1_btf_id    = &tcp_sock_id,
0144     .arg2_type  = ARG_ANYTHING,
0145 };
0146 
0147 static u32 prog_ops_moff(const struct bpf_prog *prog)
0148 {
0149     const struct btf_member *m;
0150     const struct btf_type *t;
0151     u32 midx;
0152 
0153     midx = prog->expected_attach_type;
0154     t = bpf_tcp_congestion_ops.type;
0155     m = &btf_type_member(t)[midx];
0156 
0157     return __btf_member_bit_offset(t, m) / 8;
0158 }
0159 
0160 static const struct bpf_func_proto *
0161 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
0162               const struct bpf_prog *prog)
0163 {
0164     switch (func_id) {
0165     case BPF_FUNC_tcp_send_ack:
0166         return &bpf_tcp_send_ack_proto;
0167     case BPF_FUNC_sk_storage_get:
0168         return &bpf_sk_storage_get_proto;
0169     case BPF_FUNC_sk_storage_delete:
0170         return &bpf_sk_storage_delete_proto;
0171     case BPF_FUNC_setsockopt:
0172         /* Does not allow release() to call setsockopt.
0173          * release() is called when the current bpf-tcp-cc
0174          * is retiring.  It is not allowed to call
0175          * setsockopt() to make further changes which
0176          * may potentially allocate new resources.
0177          */
0178         if (prog_ops_moff(prog) !=
0179             offsetof(struct tcp_congestion_ops, release))
0180             return &bpf_sk_setsockopt_proto;
0181         return NULL;
0182     case BPF_FUNC_getsockopt:
0183         /* Since get/setsockopt is usually expected to
0184          * be available together, disable getsockopt for
0185          * release also to avoid usage surprise.
0186          * The bpf-tcp-cc already has a more powerful way
0187          * to read tcp_sock from the PTR_TO_BTF_ID.
0188          */
0189         if (prog_ops_moff(prog) !=
0190             offsetof(struct tcp_congestion_ops, release))
0191             return &bpf_sk_getsockopt_proto;
0192         return NULL;
0193     case BPF_FUNC_ktime_get_coarse_ns:
0194         return &bpf_ktime_get_coarse_ns_proto;
0195     default:
0196         return bpf_base_func_proto(func_id);
0197     }
0198 }
0199 
0200 BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
0201 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
0202 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
0203 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
0204 BTF_ID_FLAGS(func, tcp_slow_start)
0205 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
0206 BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
0207 
0208 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
0209     .owner = THIS_MODULE,
0210     .set   = &bpf_tcp_ca_check_kfunc_ids,
0211 };
0212 
0213 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
0214     .get_func_proto     = bpf_tcp_ca_get_func_proto,
0215     .is_valid_access    = bpf_tcp_ca_is_valid_access,
0216     .btf_struct_access  = bpf_tcp_ca_btf_struct_access,
0217 };
0218 
0219 static int bpf_tcp_ca_init_member(const struct btf_type *t,
0220                   const struct btf_member *member,
0221                   void *kdata, const void *udata)
0222 {
0223     const struct tcp_congestion_ops *utcp_ca;
0224     struct tcp_congestion_ops *tcp_ca;
0225     u32 moff;
0226 
0227     utcp_ca = (const struct tcp_congestion_ops *)udata;
0228     tcp_ca = (struct tcp_congestion_ops *)kdata;
0229 
0230     moff = __btf_member_bit_offset(t, member) / 8;
0231     switch (moff) {
0232     case offsetof(struct tcp_congestion_ops, flags):
0233         if (utcp_ca->flags & ~TCP_CONG_MASK)
0234             return -EINVAL;
0235         tcp_ca->flags = utcp_ca->flags;
0236         return 1;
0237     case offsetof(struct tcp_congestion_ops, name):
0238         if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
0239                      sizeof(tcp_ca->name)) <= 0)
0240             return -EINVAL;
0241         if (tcp_ca_find(utcp_ca->name))
0242             return -EEXIST;
0243         return 1;
0244     }
0245 
0246     return 0;
0247 }
0248 
0249 static int bpf_tcp_ca_check_member(const struct btf_type *t,
0250                    const struct btf_member *member)
0251 {
0252     if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
0253         return -ENOTSUPP;
0254     return 0;
0255 }
0256 
0257 static int bpf_tcp_ca_reg(void *kdata)
0258 {
0259     return tcp_register_congestion_control(kdata);
0260 }
0261 
0262 static void bpf_tcp_ca_unreg(void *kdata)
0263 {
0264     tcp_unregister_congestion_control(kdata);
0265 }
0266 
0267 struct bpf_struct_ops bpf_tcp_congestion_ops = {
0268     .verifier_ops = &bpf_tcp_ca_verifier_ops,
0269     .reg = bpf_tcp_ca_reg,
0270     .unreg = bpf_tcp_ca_unreg,
0271     .check_member = bpf_tcp_ca_check_member,
0272     .init_member = bpf_tcp_ca_init_member,
0273     .init = bpf_tcp_ca_init,
0274     .name = "tcp_congestion_ops",
0275 };
0276 
0277 static int __init bpf_tcp_ca_kfunc_init(void)
0278 {
0279     return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
0280 }
0281 late_initcall(bpf_tcp_ca_kfunc_init);