0001
0002
0003 #include <linux/filter.h>
0004 #include <linux/mutex.h>
0005 #include <linux/socket.h>
0006 #include <linux/skbuff.h>
0007 #include <net/netlink.h>
0008 #include <net/net_namespace.h>
0009 #include <linux/module.h>
0010 #include <net/sock.h>
0011 #include <linux/kernel.h>
0012 #include <linux/tcp.h>
0013 #include <linux/workqueue.h>
0014 #include <linux/nospec.h>
0015 #include <linux/cookie.h>
0016 #include <linux/inet_diag.h>
0017 #include <linux/sock_diag.h>
0018
0019 static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
0020 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
0021 static DEFINE_MUTEX(sock_diag_table_mutex);
0022 static struct workqueue_struct *broadcast_wq;
0023
0024 DEFINE_COOKIE(sock_cookie);
0025
0026 u64 __sock_gen_cookie(struct sock *sk)
0027 {
0028 while (1) {
0029 u64 res = atomic64_read(&sk->sk_cookie);
0030
0031 if (res)
0032 return res;
0033 res = gen_cookie_next(&sock_cookie);
0034 atomic64_cmpxchg(&sk->sk_cookie, 0, res);
0035 }
0036 }
0037
0038 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
0039 {
0040 u64 res;
0041
0042 if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
0043 return 0;
0044
0045 res = sock_gen_cookie(sk);
0046 if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
0047 return -ESTALE;
0048
0049 return 0;
0050 }
0051 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
0052
0053 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
0054 {
0055 u64 res = sock_gen_cookie(sk);
0056
0057 cookie[0] = (u32)res;
0058 cookie[1] = (u32)(res >> 32);
0059 }
0060 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
0061
0062 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
0063 {
0064 u32 mem[SK_MEMINFO_VARS];
0065
0066 sk_get_meminfo(sk, mem);
0067
0068 return nla_put(skb, attrtype, sizeof(mem), &mem);
0069 }
0070 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
0071
0072 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
0073 struct sk_buff *skb, int attrtype)
0074 {
0075 struct sock_fprog_kern *fprog;
0076 struct sk_filter *filter;
0077 struct nlattr *attr;
0078 unsigned int flen;
0079 int err = 0;
0080
0081 if (!may_report_filterinfo) {
0082 nla_reserve(skb, attrtype, 0);
0083 return 0;
0084 }
0085
0086 rcu_read_lock();
0087 filter = rcu_dereference(sk->sk_filter);
0088 if (!filter)
0089 goto out;
0090
0091 fprog = filter->prog->orig_prog;
0092 if (!fprog)
0093 goto out;
0094
0095 flen = bpf_classic_proglen(fprog);
0096
0097 attr = nla_reserve(skb, attrtype, flen);
0098 if (attr == NULL) {
0099 err = -EMSGSIZE;
0100 goto out;
0101 }
0102
0103 memcpy(nla_data(attr), fprog->filter, flen);
0104 out:
0105 rcu_read_unlock();
0106 return err;
0107 }
0108 EXPORT_SYMBOL(sock_diag_put_filterinfo);
0109
0110 struct broadcast_sk {
0111 struct sock *sk;
0112 struct work_struct work;
0113 };
0114
0115 static size_t sock_diag_nlmsg_size(void)
0116 {
0117 return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
0118 + nla_total_size(sizeof(u8))
0119 + nla_total_size_64bit(sizeof(struct tcp_info)));
0120 }
0121
0122 static void sock_diag_broadcast_destroy_work(struct work_struct *work)
0123 {
0124 struct broadcast_sk *bsk =
0125 container_of(work, struct broadcast_sk, work);
0126 struct sock *sk = bsk->sk;
0127 const struct sock_diag_handler *hndl;
0128 struct sk_buff *skb;
0129 const enum sknetlink_groups group = sock_diag_destroy_group(sk);
0130 int err = -1;
0131
0132 WARN_ON(group == SKNLGRP_NONE);
0133
0134 skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL);
0135 if (!skb)
0136 goto out;
0137
0138 mutex_lock(&sock_diag_table_mutex);
0139 hndl = sock_diag_handlers[sk->sk_family];
0140 if (hndl && hndl->get_info)
0141 err = hndl->get_info(skb, sk);
0142 mutex_unlock(&sock_diag_table_mutex);
0143
0144 if (!err)
0145 nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
0146 GFP_KERNEL);
0147 else
0148 kfree_skb(skb);
0149 out:
0150 sk_destruct(sk);
0151 kfree(bsk);
0152 }
0153
0154 void sock_diag_broadcast_destroy(struct sock *sk)
0155 {
0156
0157 struct broadcast_sk *bsk =
0158 kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
0159 if (!bsk)
0160 return sk_destruct(sk);
0161 bsk->sk = sk;
0162 INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work);
0163 queue_work(broadcast_wq, &bsk->work);
0164 }
0165
0166 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
0167 {
0168 mutex_lock(&sock_diag_table_mutex);
0169 inet_rcv_compat = fn;
0170 mutex_unlock(&sock_diag_table_mutex);
0171 }
0172 EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
0173
0174 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
0175 {
0176 mutex_lock(&sock_diag_table_mutex);
0177 inet_rcv_compat = NULL;
0178 mutex_unlock(&sock_diag_table_mutex);
0179 }
0180 EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
0181
0182 int sock_diag_register(const struct sock_diag_handler *hndl)
0183 {
0184 int err = 0;
0185
0186 if (hndl->family >= AF_MAX)
0187 return -EINVAL;
0188
0189 mutex_lock(&sock_diag_table_mutex);
0190 if (sock_diag_handlers[hndl->family])
0191 err = -EBUSY;
0192 else
0193 sock_diag_handlers[hndl->family] = hndl;
0194 mutex_unlock(&sock_diag_table_mutex);
0195
0196 return err;
0197 }
0198 EXPORT_SYMBOL_GPL(sock_diag_register);
0199
0200 void sock_diag_unregister(const struct sock_diag_handler *hnld)
0201 {
0202 int family = hnld->family;
0203
0204 if (family >= AF_MAX)
0205 return;
0206
0207 mutex_lock(&sock_diag_table_mutex);
0208 BUG_ON(sock_diag_handlers[family] != hnld);
0209 sock_diag_handlers[family] = NULL;
0210 mutex_unlock(&sock_diag_table_mutex);
0211 }
0212 EXPORT_SYMBOL_GPL(sock_diag_unregister);
0213
0214 static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
0215 {
0216 int err;
0217 struct sock_diag_req *req = nlmsg_data(nlh);
0218 const struct sock_diag_handler *hndl;
0219
0220 if (nlmsg_len(nlh) < sizeof(*req))
0221 return -EINVAL;
0222
0223 if (req->sdiag_family >= AF_MAX)
0224 return -EINVAL;
0225 req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
0226
0227 if (sock_diag_handlers[req->sdiag_family] == NULL)
0228 sock_load_diag_module(req->sdiag_family, 0);
0229
0230 mutex_lock(&sock_diag_table_mutex);
0231 hndl = sock_diag_handlers[req->sdiag_family];
0232 if (hndl == NULL)
0233 err = -ENOENT;
0234 else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
0235 err = hndl->dump(skb, nlh);
0236 else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
0237 err = hndl->destroy(skb, nlh);
0238 else
0239 err = -EOPNOTSUPP;
0240 mutex_unlock(&sock_diag_table_mutex);
0241
0242 return err;
0243 }
0244
0245 static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
0246 struct netlink_ext_ack *extack)
0247 {
0248 int ret;
0249
0250 switch (nlh->nlmsg_type) {
0251 case TCPDIAG_GETSOCK:
0252 case DCCPDIAG_GETSOCK:
0253 if (inet_rcv_compat == NULL)
0254 sock_load_diag_module(AF_INET, 0);
0255
0256 mutex_lock(&sock_diag_table_mutex);
0257 if (inet_rcv_compat != NULL)
0258 ret = inet_rcv_compat(skb, nlh);
0259 else
0260 ret = -EOPNOTSUPP;
0261 mutex_unlock(&sock_diag_table_mutex);
0262
0263 return ret;
0264 case SOCK_DIAG_BY_FAMILY:
0265 case SOCK_DESTROY:
0266 return __sock_diag_cmd(skb, nlh);
0267 default:
0268 return -EINVAL;
0269 }
0270 }
0271
0272 static DEFINE_MUTEX(sock_diag_mutex);
0273
0274 static void sock_diag_rcv(struct sk_buff *skb)
0275 {
0276 mutex_lock(&sock_diag_mutex);
0277 netlink_rcv_skb(skb, &sock_diag_rcv_msg);
0278 mutex_unlock(&sock_diag_mutex);
0279 }
0280
0281 static int sock_diag_bind(struct net *net, int group)
0282 {
0283 switch (group) {
0284 case SKNLGRP_INET_TCP_DESTROY:
0285 case SKNLGRP_INET_UDP_DESTROY:
0286 if (!sock_diag_handlers[AF_INET])
0287 sock_load_diag_module(AF_INET, 0);
0288 break;
0289 case SKNLGRP_INET6_TCP_DESTROY:
0290 case SKNLGRP_INET6_UDP_DESTROY:
0291 if (!sock_diag_handlers[AF_INET6])
0292 sock_load_diag_module(AF_INET6, 0);
0293 break;
0294 }
0295 return 0;
0296 }
0297
0298 int sock_diag_destroy(struct sock *sk, int err)
0299 {
0300 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
0301 return -EPERM;
0302
0303 if (!sk->sk_prot->diag_destroy)
0304 return -EOPNOTSUPP;
0305
0306 return sk->sk_prot->diag_destroy(sk, err);
0307 }
0308 EXPORT_SYMBOL_GPL(sock_diag_destroy);
0309
0310 static int __net_init diag_net_init(struct net *net)
0311 {
0312 struct netlink_kernel_cfg cfg = {
0313 .groups = SKNLGRP_MAX,
0314 .input = sock_diag_rcv,
0315 .bind = sock_diag_bind,
0316 .flags = NL_CFG_F_NONROOT_RECV,
0317 };
0318
0319 net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
0320 return net->diag_nlsk == NULL ? -ENOMEM : 0;
0321 }
0322
0323 static void __net_exit diag_net_exit(struct net *net)
0324 {
0325 netlink_kernel_release(net->diag_nlsk);
0326 net->diag_nlsk = NULL;
0327 }
0328
0329 static struct pernet_operations diag_net_ops = {
0330 .init = diag_net_init,
0331 .exit = diag_net_exit,
0332 };
0333
0334 static int __init sock_diag_init(void)
0335 {
0336 broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0);
0337 BUG_ON(!broadcast_wq);
0338 return register_pernet_subsys(&diag_net_ops);
0339 }
0340 device_initcall(sock_diag_init);