0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/uaccess.h>
0019 #include <linux/types.h>
0020 #include <linux/fcntl.h>
0021 #include <linux/socket.h>
0022 #include <linux/sockios.h>
0023 #include <linux/in.h>
0024 #include <linux/errno.h>
0025 #include <linux/timer.h>
0026 #include <linux/mm.h>
0027 #include <linux/inet.h>
0028 #include <linux/netdevice.h>
0029 #include <net/snmp.h>
0030 #include <net/ip.h>
0031 #include <net/icmp.h>
0032 #include <net/protocol.h>
0033 #include <linux/skbuff.h>
0034 #include <linux/proc_fs.h>
0035 #include <linux/export.h>
0036 #include <net/sock.h>
0037 #include <net/ping.h>
0038 #include <net/udp.h>
0039 #include <net/route.h>
0040 #include <net/inet_common.h>
0041 #include <net/checksum.h>
0042
0043 #if IS_ENABLED(CONFIG_IPV6)
0044 #include <linux/in6.h>
0045 #include <linux/icmpv6.h>
0046 #include <net/addrconf.h>
0047 #include <net/ipv6.h>
0048 #include <net/transp_v6.h>
0049 #endif
0050
0051 struct ping_table {
0052 struct hlist_nulls_head hash[PING_HTABLE_SIZE];
0053 spinlock_t lock;
0054 };
0055
0056 static struct ping_table ping_table;
0057 struct pingv6_ops pingv6_ops;
0058 EXPORT_SYMBOL_GPL(pingv6_ops);
0059
0060 static u16 ping_port_rover;
0061
0062 static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask)
0063 {
0064 u32 res = (num + net_hash_mix(net)) & mask;
0065
0066 pr_debug("hash(%u) = %u\n", num, res);
0067 return res;
0068 }
0069 EXPORT_SYMBOL_GPL(ping_hash);
0070
0071 static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
0072 struct net *net, unsigned int num)
0073 {
0074 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
0075 }
0076
0077 int ping_get_port(struct sock *sk, unsigned short ident)
0078 {
0079 struct hlist_nulls_node *node;
0080 struct hlist_nulls_head *hlist;
0081 struct inet_sock *isk, *isk2;
0082 struct sock *sk2 = NULL;
0083
0084 isk = inet_sk(sk);
0085 spin_lock(&ping_table.lock);
0086 if (ident == 0) {
0087 u32 i;
0088 u16 result = ping_port_rover + 1;
0089
0090 for (i = 0; i < (1L << 16); i++, result++) {
0091 if (!result)
0092 result++;
0093 hlist = ping_hashslot(&ping_table, sock_net(sk),
0094 result);
0095 ping_portaddr_for_each_entry(sk2, node, hlist) {
0096 isk2 = inet_sk(sk2);
0097
0098 if (isk2->inet_num == result)
0099 goto next_port;
0100 }
0101
0102
0103 ping_port_rover = ident = result;
0104 break;
0105 next_port:
0106 ;
0107 }
0108 if (i >= (1L << 16))
0109 goto fail;
0110 } else {
0111 hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
0112 ping_portaddr_for_each_entry(sk2, node, hlist) {
0113 isk2 = inet_sk(sk2);
0114
0115
0116
0117
0118
0119 if ((isk2->inet_num == ident) &&
0120 (sk2 != sk) &&
0121 (!sk2->sk_reuse || !sk->sk_reuse))
0122 goto fail;
0123 }
0124 }
0125
0126 pr_debug("found port/ident = %d\n", ident);
0127 isk->inet_num = ident;
0128 if (sk_unhashed(sk)) {
0129 pr_debug("was not hashed\n");
0130 sock_hold(sk);
0131 sock_set_flag(sk, SOCK_RCU_FREE);
0132 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, hlist);
0133 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
0134 }
0135 spin_unlock(&ping_table.lock);
0136 return 0;
0137
0138 fail:
0139 spin_unlock(&ping_table.lock);
0140 return 1;
0141 }
0142 EXPORT_SYMBOL_GPL(ping_get_port);
0143
0144 int ping_hash(struct sock *sk)
0145 {
0146 pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
0147 BUG();
0148
0149 return 0;
0150 }
0151
0152 void ping_unhash(struct sock *sk)
0153 {
0154 struct inet_sock *isk = inet_sk(sk);
0155
0156 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
0157 spin_lock(&ping_table.lock);
0158 if (sk_hashed(sk)) {
0159 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
0160 sock_put(sk);
0161 isk->inet_num = 0;
0162 isk->inet_sport = 0;
0163 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
0164 }
0165 spin_unlock(&ping_table.lock);
0166 }
0167 EXPORT_SYMBOL_GPL(ping_unhash);
0168
0169
0170 static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
0171 {
0172 struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
0173 struct sock *sk = NULL;
0174 struct inet_sock *isk;
0175 struct hlist_nulls_node *hnode;
0176 int dif, sdif;
0177
0178 if (skb->protocol == htons(ETH_P_IP)) {
0179 dif = inet_iif(skb);
0180 sdif = inet_sdif(skb);
0181 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
0182 (int)ident, &ip_hdr(skb)->daddr, dif);
0183 #if IS_ENABLED(CONFIG_IPV6)
0184 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0185 dif = inet6_iif(skb);
0186 sdif = inet6_sdif(skb);
0187 pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
0188 (int)ident, &ipv6_hdr(skb)->daddr, dif);
0189 #endif
0190 } else {
0191 return NULL;
0192 }
0193
0194 ping_portaddr_for_each_entry(sk, hnode, hslot) {
0195 isk = inet_sk(sk);
0196
0197 pr_debug("iterate\n");
0198 if (isk->inet_num != ident)
0199 continue;
0200
0201 if (skb->protocol == htons(ETH_P_IP) &&
0202 sk->sk_family == AF_INET) {
0203 pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
0204 (int) isk->inet_num, &isk->inet_rcv_saddr,
0205 sk->sk_bound_dev_if);
0206
0207 if (isk->inet_rcv_saddr &&
0208 isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
0209 continue;
0210 #if IS_ENABLED(CONFIG_IPV6)
0211 } else if (skb->protocol == htons(ETH_P_IPV6) &&
0212 sk->sk_family == AF_INET6) {
0213
0214 pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
0215 (int) isk->inet_num,
0216 &sk->sk_v6_rcv_saddr,
0217 sk->sk_bound_dev_if);
0218
0219 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
0220 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
0221 &ipv6_hdr(skb)->daddr))
0222 continue;
0223 #endif
0224 } else {
0225 continue;
0226 }
0227
0228 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
0229 sk->sk_bound_dev_if != sdif)
0230 continue;
0231
0232 goto exit;
0233 }
0234
0235 sk = NULL;
0236 exit:
0237
0238 return sk;
0239 }
0240
0241 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
0242 kgid_t *high)
0243 {
0244 kgid_t *data = net->ipv4.ping_group_range.range;
0245 unsigned int seq;
0246
0247 do {
0248 seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
0249
0250 *low = data[0];
0251 *high = data[1];
0252 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
0253 }
0254
0255
0256 int ping_init_sock(struct sock *sk)
0257 {
0258 struct net *net = sock_net(sk);
0259 kgid_t group = current_egid();
0260 struct group_info *group_info;
0261 int i;
0262 kgid_t low, high;
0263 int ret = 0;
0264
0265 if (sk->sk_family == AF_INET6)
0266 sk->sk_ipv6only = 1;
0267
0268 inet_get_ping_group_range_net(net, &low, &high);
0269 if (gid_lte(low, group) && gid_lte(group, high))
0270 return 0;
0271
0272 group_info = get_current_groups();
0273 for (i = 0; i < group_info->ngroups; i++) {
0274 kgid_t gid = group_info->gid[i];
0275
0276 if (gid_lte(low, gid) && gid_lte(gid, high))
0277 goto out_release_group;
0278 }
0279
0280 ret = -EACCES;
0281
0282 out_release_group:
0283 put_group_info(group_info);
0284 return ret;
0285 }
0286 EXPORT_SYMBOL_GPL(ping_init_sock);
0287
0288 void ping_close(struct sock *sk, long timeout)
0289 {
0290 pr_debug("ping_close(sk=%p,sk->num=%u)\n",
0291 inet_sk(sk), inet_sk(sk)->inet_num);
0292 pr_debug("isk->refcnt = %d\n", refcount_read(&sk->sk_refcnt));
0293
0294 sk_common_release(sk);
0295 }
0296 EXPORT_SYMBOL_GPL(ping_close);
0297
0298
0299 static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
0300 struct sockaddr *uaddr, int addr_len)
0301 {
0302 struct net *net = sock_net(sk);
0303 if (sk->sk_family == AF_INET) {
0304 struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
0305 u32 tb_id = RT_TABLE_LOCAL;
0306 int chk_addr_ret;
0307
0308 if (addr_len < sizeof(*addr))
0309 return -EINVAL;
0310
0311 if (addr->sin_family != AF_INET &&
0312 !(addr->sin_family == AF_UNSPEC &&
0313 addr->sin_addr.s_addr == htonl(INADDR_ANY)))
0314 return -EAFNOSUPPORT;
0315
0316 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
0317 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
0318
0319 if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
0320 return 0;
0321
0322 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
0323 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
0324
0325 if (chk_addr_ret == RTN_MULTICAST ||
0326 chk_addr_ret == RTN_BROADCAST ||
0327 (chk_addr_ret != RTN_LOCAL &&
0328 !inet_can_nonlocal_bind(net, isk)))
0329 return -EADDRNOTAVAIL;
0330
0331 #if IS_ENABLED(CONFIG_IPV6)
0332 } else if (sk->sk_family == AF_INET6) {
0333 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
0334 int addr_type, scoped, has_addr;
0335 struct net_device *dev = NULL;
0336
0337 if (addr_len < sizeof(*addr))
0338 return -EINVAL;
0339
0340 if (addr->sin6_family != AF_INET6)
0341 return -EAFNOSUPPORT;
0342
0343 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
0344 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
0345
0346 addr_type = ipv6_addr_type(&addr->sin6_addr);
0347 scoped = __ipv6_addr_needs_scope_id(addr_type);
0348 if ((addr_type != IPV6_ADDR_ANY &&
0349 !(addr_type & IPV6_ADDR_UNICAST)) ||
0350 (scoped && !addr->sin6_scope_id))
0351 return -EINVAL;
0352
0353 rcu_read_lock();
0354 if (addr->sin6_scope_id) {
0355 dev = dev_get_by_index_rcu(net, addr->sin6_scope_id);
0356 if (!dev) {
0357 rcu_read_unlock();
0358 return -ENODEV;
0359 }
0360 }
0361
0362 if (!dev && sk->sk_bound_dev_if) {
0363 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
0364 if (!dev) {
0365 rcu_read_unlock();
0366 return -ENODEV;
0367 }
0368 }
0369 has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
0370 scoped);
0371 rcu_read_unlock();
0372
0373 if (!(ipv6_can_nonlocal_bind(net, isk) || has_addr ||
0374 addr_type == IPV6_ADDR_ANY))
0375 return -EADDRNOTAVAIL;
0376
0377 if (scoped)
0378 sk->sk_bound_dev_if = addr->sin6_scope_id;
0379 #endif
0380 } else {
0381 return -EAFNOSUPPORT;
0382 }
0383 return 0;
0384 }
0385
0386 static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
0387 {
0388 if (saddr->sa_family == AF_INET) {
0389 struct inet_sock *isk = inet_sk(sk);
0390 struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
0391 isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
0392 #if IS_ENABLED(CONFIG_IPV6)
0393 } else if (saddr->sa_family == AF_INET6) {
0394 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
0395 struct ipv6_pinfo *np = inet6_sk(sk);
0396 sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr;
0397 #endif
0398 }
0399 }
0400
0401
0402
0403
0404
0405
0406 int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
0407 {
0408 struct inet_sock *isk = inet_sk(sk);
0409 unsigned short snum;
0410 int err;
0411 int dif = sk->sk_bound_dev_if;
0412
0413 err = ping_check_bind_addr(sk, isk, uaddr, addr_len);
0414 if (err)
0415 return err;
0416
0417 lock_sock(sk);
0418
0419 err = -EINVAL;
0420 if (isk->inet_num != 0)
0421 goto out;
0422
0423 err = -EADDRINUSE;
0424 snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port);
0425 if (ping_get_port(sk, snum) != 0) {
0426
0427 sk->sk_bound_dev_if = dif;
0428 goto out;
0429 }
0430 ping_set_saddr(sk, uaddr);
0431
0432 pr_debug("after bind(): num = %hu, dif = %d\n",
0433 isk->inet_num,
0434 sk->sk_bound_dev_if);
0435
0436 err = 0;
0437 if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
0438 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
0439 #if IS_ENABLED(CONFIG_IPV6)
0440 if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr))
0441 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
0442 #endif
0443
0444 if (snum)
0445 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
0446 isk->inet_sport = htons(isk->inet_num);
0447 isk->inet_daddr = 0;
0448 isk->inet_dport = 0;
0449
0450 #if IS_ENABLED(CONFIG_IPV6)
0451 if (sk->sk_family == AF_INET6)
0452 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
0453 #endif
0454
0455 sk_dst_reset(sk);
0456 out:
0457 release_sock(sk);
0458 pr_debug("ping_v4_bind -> %d\n", err);
0459 return err;
0460 }
0461 EXPORT_SYMBOL_GPL(ping_bind);
0462
0463
0464
0465
0466
0467 static inline int ping_supported(int family, int type, int code)
0468 {
0469 return (family == AF_INET && type == ICMP_ECHO && code == 0) ||
0470 (family == AF_INET && type == ICMP_EXT_ECHO && code == 0) ||
0471 (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0) ||
0472 (family == AF_INET6 && type == ICMPV6_EXT_ECHO_REQUEST && code == 0);
0473 }
0474
0475
0476
0477
0478
0479
0480 void ping_err(struct sk_buff *skb, int offset, u32 info)
0481 {
0482 int family;
0483 struct icmphdr *icmph;
0484 struct inet_sock *inet_sock;
0485 int type;
0486 int code;
0487 struct net *net = dev_net(skb->dev);
0488 struct sock *sk;
0489 int harderr;
0490 int err;
0491
0492 if (skb->protocol == htons(ETH_P_IP)) {
0493 family = AF_INET;
0494 type = icmp_hdr(skb)->type;
0495 code = icmp_hdr(skb)->code;
0496 icmph = (struct icmphdr *)(skb->data + offset);
0497 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0498 family = AF_INET6;
0499 type = icmp6_hdr(skb)->icmp6_type;
0500 code = icmp6_hdr(skb)->icmp6_code;
0501 icmph = (struct icmphdr *) (skb->data + offset);
0502 } else {
0503 BUG();
0504 }
0505
0506
0507
0508 if (!ping_supported(family, icmph->type, icmph->code))
0509 return;
0510
0511 pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n",
0512 skb->protocol, type, code, ntohs(icmph->un.echo.id),
0513 ntohs(icmph->un.echo.sequence));
0514
0515 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
0516 if (!sk) {
0517 pr_debug("no socket, dropping\n");
0518 return;
0519 }
0520 pr_debug("err on socket %p\n", sk);
0521
0522 err = 0;
0523 harderr = 0;
0524 inet_sock = inet_sk(sk);
0525
0526 if (skb->protocol == htons(ETH_P_IP)) {
0527 switch (type) {
0528 default:
0529 case ICMP_TIME_EXCEEDED:
0530 err = EHOSTUNREACH;
0531 break;
0532 case ICMP_SOURCE_QUENCH:
0533
0534
0535
0536 err = EREMOTEIO;
0537 break;
0538 case ICMP_PARAMETERPROB:
0539 err = EPROTO;
0540 harderr = 1;
0541 break;
0542 case ICMP_DEST_UNREACH:
0543 if (code == ICMP_FRAG_NEEDED) {
0544 ipv4_sk_update_pmtu(skb, sk, info);
0545 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
0546 err = EMSGSIZE;
0547 harderr = 1;
0548 break;
0549 }
0550 goto out;
0551 }
0552 err = EHOSTUNREACH;
0553 if (code <= NR_ICMP_UNREACH) {
0554 harderr = icmp_err_convert[code].fatal;
0555 err = icmp_err_convert[code].errno;
0556 }
0557 break;
0558 case ICMP_REDIRECT:
0559
0560 ipv4_sk_redirect(skb, sk);
0561 err = EREMOTEIO;
0562 break;
0563 }
0564 #if IS_ENABLED(CONFIG_IPV6)
0565 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0566 harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
0567 #endif
0568 }
0569
0570
0571
0572
0573
0574 if ((family == AF_INET && !inet_sock->recverr) ||
0575 (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
0576 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
0577 goto out;
0578 } else {
0579 if (family == AF_INET) {
0580 ip_icmp_error(sk, skb, err, 0 ,
0581 info, (u8 *)icmph);
0582 #if IS_ENABLED(CONFIG_IPV6)
0583 } else if (family == AF_INET6) {
0584 pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
0585 info, (u8 *)icmph);
0586 #endif
0587 }
0588 }
0589 sk->sk_err = err;
0590 sk_error_report(sk);
0591 out:
0592 return;
0593 }
0594 EXPORT_SYMBOL_GPL(ping_err);
0595
0596
0597
0598
0599
0600
0601 int ping_getfrag(void *from, char *to,
0602 int offset, int fraglen, int odd, struct sk_buff *skb)
0603 {
0604 struct pingfakehdr *pfh = from;
0605
0606 if (offset == 0) {
0607 fraglen -= sizeof(struct icmphdr);
0608 if (fraglen < 0)
0609 BUG();
0610 if (!csum_and_copy_from_iter_full(to + sizeof(struct icmphdr),
0611 fraglen, &pfh->wcheck,
0612 &pfh->msg->msg_iter))
0613 return -EFAULT;
0614 } else if (offset < sizeof(struct icmphdr)) {
0615 BUG();
0616 } else {
0617 if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck,
0618 &pfh->msg->msg_iter))
0619 return -EFAULT;
0620 }
0621
0622 #if IS_ENABLED(CONFIG_IPV6)
0623
0624
0625
0626
0627 if (pfh->family == AF_INET6) {
0628 skb->csum = pfh->wcheck;
0629 skb->ip_summed = CHECKSUM_NONE;
0630 pfh->wcheck = 0;
0631 }
0632 #endif
0633
0634 return 0;
0635 }
0636 EXPORT_SYMBOL_GPL(ping_getfrag);
0637
0638 static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
0639 struct flowi4 *fl4)
0640 {
0641 struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
0642
0643 if (!skb)
0644 return 0;
0645 pfh->wcheck = csum_partial((char *)&pfh->icmph,
0646 sizeof(struct icmphdr), pfh->wcheck);
0647 pfh->icmph.checksum = csum_fold(pfh->wcheck);
0648 memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr));
0649 skb->ip_summed = CHECKSUM_NONE;
0650 return ip_push_pending_frames(sk, fl4);
0651 }
0652
0653 int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
0654 void *user_icmph, size_t icmph_len)
0655 {
0656 u8 type, code;
0657
0658 if (len > 0xFFFF)
0659 return -EMSGSIZE;
0660
0661
0662 if (len < icmph_len)
0663 return -EINVAL;
0664
0665
0666
0667
0668
0669
0670 if (msg->msg_flags & MSG_OOB)
0671 return -EOPNOTSUPP;
0672
0673
0674
0675
0676
0677 if (memcpy_from_msg(user_icmph, msg, icmph_len))
0678 return -EFAULT;
0679
0680 if (family == AF_INET) {
0681 type = ((struct icmphdr *) user_icmph)->type;
0682 code = ((struct icmphdr *) user_icmph)->code;
0683 #if IS_ENABLED(CONFIG_IPV6)
0684 } else if (family == AF_INET6) {
0685 type = ((struct icmp6hdr *) user_icmph)->icmp6_type;
0686 code = ((struct icmp6hdr *) user_icmph)->icmp6_code;
0687 #endif
0688 } else {
0689 BUG();
0690 }
0691
0692 if (!ping_supported(family, type, code))
0693 return -EINVAL;
0694
0695 return 0;
0696 }
0697 EXPORT_SYMBOL_GPL(ping_common_sendmsg);
0698
0699 static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
0700 {
0701 struct net *net = sock_net(sk);
0702 struct flowi4 fl4;
0703 struct inet_sock *inet = inet_sk(sk);
0704 struct ipcm_cookie ipc;
0705 struct icmphdr user_icmph;
0706 struct pingfakehdr pfh;
0707 struct rtable *rt = NULL;
0708 struct ip_options_data opt_copy;
0709 int free = 0;
0710 __be32 saddr, daddr, faddr;
0711 u8 tos;
0712 int err;
0713
0714 pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
0715
0716 err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph,
0717 sizeof(user_icmph));
0718 if (err)
0719 return err;
0720
0721
0722
0723
0724
0725 if (msg->msg_name) {
0726 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
0727 if (msg->msg_namelen < sizeof(*usin))
0728 return -EINVAL;
0729 if (usin->sin_family != AF_INET)
0730 return -EAFNOSUPPORT;
0731 daddr = usin->sin_addr.s_addr;
0732
0733 } else {
0734 if (sk->sk_state != TCP_ESTABLISHED)
0735 return -EDESTADDRREQ;
0736 daddr = inet->inet_daddr;
0737
0738 }
0739
0740 ipcm_init_sk(&ipc, inet);
0741
0742 if (msg->msg_controllen) {
0743 err = ip_cmsg_send(sk, msg, &ipc, false);
0744 if (unlikely(err)) {
0745 kfree(ipc.opt);
0746 return err;
0747 }
0748 if (ipc.opt)
0749 free = 1;
0750 }
0751 if (!ipc.opt) {
0752 struct ip_options_rcu *inet_opt;
0753
0754 rcu_read_lock();
0755 inet_opt = rcu_dereference(inet->inet_opt);
0756 if (inet_opt) {
0757 memcpy(&opt_copy, inet_opt,
0758 sizeof(*inet_opt) + inet_opt->opt.optlen);
0759 ipc.opt = &opt_copy.opt;
0760 }
0761 rcu_read_unlock();
0762 }
0763
0764 saddr = ipc.addr;
0765 ipc.addr = faddr = daddr;
0766
0767 if (ipc.opt && ipc.opt->opt.srr) {
0768 if (!daddr) {
0769 err = -EINVAL;
0770 goto out_free;
0771 }
0772 faddr = ipc.opt->opt.faddr;
0773 }
0774 tos = get_rttos(&ipc, inet);
0775 if (sock_flag(sk, SOCK_LOCALROUTE) ||
0776 (msg->msg_flags & MSG_DONTROUTE) ||
0777 (ipc.opt && ipc.opt->opt.is_strictroute)) {
0778 tos |= RTO_ONLINK;
0779 }
0780
0781 if (ipv4_is_multicast(daddr)) {
0782 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
0783 ipc.oif = inet->mc_index;
0784 if (!saddr)
0785 saddr = inet->mc_addr;
0786 } else if (!ipc.oif)
0787 ipc.oif = inet->uc_index;
0788
0789 flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
0790 RT_SCOPE_UNIVERSE, sk->sk_protocol,
0791 inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
0792 sk->sk_uid);
0793
0794 fl4.fl4_icmp_type = user_icmph.type;
0795 fl4.fl4_icmp_code = user_icmph.code;
0796
0797 security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4));
0798 rt = ip_route_output_flow(net, &fl4, sk);
0799 if (IS_ERR(rt)) {
0800 err = PTR_ERR(rt);
0801 rt = NULL;
0802 if (err == -ENETUNREACH)
0803 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
0804 goto out;
0805 }
0806
0807 err = -EACCES;
0808 if ((rt->rt_flags & RTCF_BROADCAST) &&
0809 !sock_flag(sk, SOCK_BROADCAST))
0810 goto out;
0811
0812 if (msg->msg_flags & MSG_CONFIRM)
0813 goto do_confirm;
0814 back_from_confirm:
0815
0816 if (!ipc.addr)
0817 ipc.addr = fl4.daddr;
0818
0819 lock_sock(sk);
0820
0821 pfh.icmph.type = user_icmph.type;
0822 pfh.icmph.code = user_icmph.code;
0823 pfh.icmph.checksum = 0;
0824 pfh.icmph.un.echo.id = inet->inet_sport;
0825 pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
0826 pfh.msg = msg;
0827 pfh.wcheck = 0;
0828 pfh.family = AF_INET;
0829
0830 err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
0831 0, &ipc, &rt, msg->msg_flags);
0832 if (err)
0833 ip_flush_pending_frames(sk);
0834 else
0835 err = ping_v4_push_pending_frames(sk, &pfh, &fl4);
0836 release_sock(sk);
0837
0838 out:
0839 ip_rt_put(rt);
0840 out_free:
0841 if (free)
0842 kfree(ipc.opt);
0843 if (!err) {
0844 icmp_out_count(sock_net(sk), user_icmph.type);
0845 return len;
0846 }
0847 return err;
0848
0849 do_confirm:
0850 if (msg->msg_flags & MSG_PROBE)
0851 dst_confirm_neigh(&rt->dst, &fl4.daddr);
0852 if (!(msg->msg_flags & MSG_PROBE) || len)
0853 goto back_from_confirm;
0854 err = 0;
0855 goto out;
0856 }
0857
0858 int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
0859 int *addr_len)
0860 {
0861 struct inet_sock *isk = inet_sk(sk);
0862 int family = sk->sk_family;
0863 struct sk_buff *skb;
0864 int copied, err;
0865
0866 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
0867
0868 err = -EOPNOTSUPP;
0869 if (flags & MSG_OOB)
0870 goto out;
0871
0872 if (flags & MSG_ERRQUEUE)
0873 return inet_recv_error(sk, msg, len, addr_len);
0874
0875 skb = skb_recv_datagram(sk, flags, &err);
0876 if (!skb)
0877 goto out;
0878
0879 copied = skb->len;
0880 if (copied > len) {
0881 msg->msg_flags |= MSG_TRUNC;
0882 copied = len;
0883 }
0884
0885
0886 err = skb_copy_datagram_msg(skb, 0, msg, copied);
0887 if (err)
0888 goto done;
0889
0890 sock_recv_timestamp(msg, sk, skb);
0891
0892
0893 if (family == AF_INET) {
0894 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
0895
0896 if (sin) {
0897 sin->sin_family = AF_INET;
0898 sin->sin_port = 0 ;
0899 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
0900 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
0901 *addr_len = sizeof(*sin);
0902 }
0903
0904 if (isk->cmsg_flags)
0905 ip_cmsg_recv(msg, skb);
0906
0907 #if IS_ENABLED(CONFIG_IPV6)
0908 } else if (family == AF_INET6) {
0909 struct ipv6_pinfo *np = inet6_sk(sk);
0910 struct ipv6hdr *ip6 = ipv6_hdr(skb);
0911 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
0912
0913 if (sin6) {
0914 sin6->sin6_family = AF_INET6;
0915 sin6->sin6_port = 0;
0916 sin6->sin6_addr = ip6->saddr;
0917 sin6->sin6_flowinfo = 0;
0918 if (np->sndflow)
0919 sin6->sin6_flowinfo = ip6_flowinfo(ip6);
0920 sin6->sin6_scope_id =
0921 ipv6_iface_scope_id(&sin6->sin6_addr,
0922 inet6_iif(skb));
0923 *addr_len = sizeof(*sin6);
0924 }
0925
0926 if (inet6_sk(sk)->rxopt.all)
0927 pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
0928 if (skb->protocol == htons(ETH_P_IPV6) &&
0929 inet6_sk(sk)->rxopt.all)
0930 pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
0931 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
0932 ip_cmsg_recv(msg, skb);
0933 #endif
0934 } else {
0935 BUG();
0936 }
0937
0938 err = copied;
0939
0940 done:
0941 skb_free_datagram(sk, skb);
0942 out:
0943 pr_debug("ping_recvmsg -> %d\n", err);
0944 return err;
0945 }
0946 EXPORT_SYMBOL_GPL(ping_recvmsg);
0947
0948 static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
0949 struct sk_buff *skb)
0950 {
0951 enum skb_drop_reason reason;
0952
0953 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
0954 inet_sk(sk), inet_sk(sk)->inet_num, skb);
0955 if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
0956 kfree_skb_reason(skb, reason);
0957 pr_debug("ping_queue_rcv_skb -> failed\n");
0958 return reason;
0959 }
0960 return SKB_NOT_DROPPED_YET;
0961 }
0962
0963 int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
0964 {
0965 return __ping_queue_rcv_skb(sk, skb) ? -1 : 0;
0966 }
0967 EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
0968
0969
0970
0971
0972
0973
0974 enum skb_drop_reason ping_rcv(struct sk_buff *skb)
0975 {
0976 enum skb_drop_reason reason = SKB_DROP_REASON_NO_SOCKET;
0977 struct sock *sk;
0978 struct net *net = dev_net(skb->dev);
0979 struct icmphdr *icmph = icmp_hdr(skb);
0980
0981
0982
0983 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
0984 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
0985
0986
0987 skb_push(skb, skb->data - (u8 *)icmph);
0988
0989 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
0990 if (sk) {
0991 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
0992
0993 pr_debug("rcv on socket %p\n", sk);
0994 if (skb2)
0995 reason = __ping_queue_rcv_skb(sk, skb2);
0996 else
0997 reason = SKB_DROP_REASON_NOMEM;
0998 }
0999
1000 if (reason)
1001 pr_debug("no socket, dropping\n");
1002
1003 return reason;
1004 }
1005 EXPORT_SYMBOL_GPL(ping_rcv);
1006
1007 struct proto ping_prot = {
1008 .name = "PING",
1009 .owner = THIS_MODULE,
1010 .init = ping_init_sock,
1011 .close = ping_close,
1012 .connect = ip4_datagram_connect,
1013 .disconnect = __udp_disconnect,
1014 .setsockopt = ip_setsockopt,
1015 .getsockopt = ip_getsockopt,
1016 .sendmsg = ping_v4_sendmsg,
1017 .recvmsg = ping_recvmsg,
1018 .bind = ping_bind,
1019 .backlog_rcv = ping_queue_rcv_skb,
1020 .release_cb = ip4_datagram_release_cb,
1021 .hash = ping_hash,
1022 .unhash = ping_unhash,
1023 .get_port = ping_get_port,
1024 .put_port = ping_unhash,
1025 .obj_size = sizeof(struct inet_sock),
1026 };
1027 EXPORT_SYMBOL(ping_prot);
1028
1029 #ifdef CONFIG_PROC_FS
1030
1031 static struct sock *ping_get_first(struct seq_file *seq, int start)
1032 {
1033 struct sock *sk;
1034 struct ping_iter_state *state = seq->private;
1035 struct net *net = seq_file_net(seq);
1036
1037 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
1038 ++state->bucket) {
1039 struct hlist_nulls_node *node;
1040 struct hlist_nulls_head *hslot;
1041
1042 hslot = &ping_table.hash[state->bucket];
1043
1044 if (hlist_nulls_empty(hslot))
1045 continue;
1046
1047 sk_nulls_for_each(sk, node, hslot) {
1048 if (net_eq(sock_net(sk), net) &&
1049 sk->sk_family == state->family)
1050 goto found;
1051 }
1052 }
1053 sk = NULL;
1054 found:
1055 return sk;
1056 }
1057
1058 static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk)
1059 {
1060 struct ping_iter_state *state = seq->private;
1061 struct net *net = seq_file_net(seq);
1062
1063 do {
1064 sk = sk_nulls_next(sk);
1065 } while (sk && (!net_eq(sock_net(sk), net)));
1066
1067 if (!sk)
1068 return ping_get_first(seq, state->bucket + 1);
1069 return sk;
1070 }
1071
1072 static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
1073 {
1074 struct sock *sk = ping_get_first(seq, 0);
1075
1076 if (sk)
1077 while (pos && (sk = ping_get_next(seq, sk)) != NULL)
1078 --pos;
1079 return pos ? NULL : sk;
1080 }
1081
1082 void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
1083 __acquires(RCU)
1084 {
1085 struct ping_iter_state *state = seq->private;
1086 state->bucket = 0;
1087 state->family = family;
1088
1089 rcu_read_lock();
1090
1091 return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
1092 }
1093 EXPORT_SYMBOL_GPL(ping_seq_start);
1094
1095 static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos)
1096 {
1097 return ping_seq_start(seq, pos, AF_INET);
1098 }
1099
1100 void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1101 {
1102 struct sock *sk;
1103
1104 if (v == SEQ_START_TOKEN)
1105 sk = ping_get_idx(seq, 0);
1106 else
1107 sk = ping_get_next(seq, v);
1108
1109 ++*pos;
1110 return sk;
1111 }
1112 EXPORT_SYMBOL_GPL(ping_seq_next);
1113
1114 void ping_seq_stop(struct seq_file *seq, void *v)
1115 __releases(RCU)
1116 {
1117 rcu_read_unlock();
1118 }
1119 EXPORT_SYMBOL_GPL(ping_seq_stop);
1120
1121 static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
1122 int bucket)
1123 {
1124 struct inet_sock *inet = inet_sk(sp);
1125 __be32 dest = inet->inet_daddr;
1126 __be32 src = inet->inet_rcv_saddr;
1127 __u16 destp = ntohs(inet->inet_dport);
1128 __u16 srcp = ntohs(inet->inet_sport);
1129
1130 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
1131 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
1132 bucket, src, srcp, dest, destp, sp->sk_state,
1133 sk_wmem_alloc_get(sp),
1134 sk_rmem_alloc_get(sp),
1135 0, 0L, 0,
1136 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
1137 0, sock_i_ino(sp),
1138 refcount_read(&sp->sk_refcnt), sp,
1139 atomic_read(&sp->sk_drops));
1140 }
1141
1142 static int ping_v4_seq_show(struct seq_file *seq, void *v)
1143 {
1144 seq_setwidth(seq, 127);
1145 if (v == SEQ_START_TOKEN)
1146 seq_puts(seq, " sl local_address rem_address st tx_queue "
1147 "rx_queue tr tm->when retrnsmt uid timeout "
1148 "inode ref pointer drops");
1149 else {
1150 struct ping_iter_state *state = seq->private;
1151
1152 ping_v4_format_sock(v, seq, state->bucket);
1153 }
1154 seq_pad(seq, '\n');
1155 return 0;
1156 }
1157
1158 static const struct seq_operations ping_v4_seq_ops = {
1159 .start = ping_v4_seq_start,
1160 .show = ping_v4_seq_show,
1161 .next = ping_seq_next,
1162 .stop = ping_seq_stop,
1163 };
1164
1165 static int __net_init ping_v4_proc_init_net(struct net *net)
1166 {
1167 if (!proc_create_net("icmp", 0444, net->proc_net, &ping_v4_seq_ops,
1168 sizeof(struct ping_iter_state)))
1169 return -ENOMEM;
1170 return 0;
1171 }
1172
1173 static void __net_exit ping_v4_proc_exit_net(struct net *net)
1174 {
1175 remove_proc_entry("icmp", net->proc_net);
1176 }
1177
1178 static struct pernet_operations ping_v4_net_ops = {
1179 .init = ping_v4_proc_init_net,
1180 .exit = ping_v4_proc_exit_net,
1181 };
1182
1183 int __init ping_proc_init(void)
1184 {
1185 return register_pernet_subsys(&ping_v4_net_ops);
1186 }
1187
1188 void ping_proc_exit(void)
1189 {
1190 unregister_pernet_subsys(&ping_v4_net_ops);
1191 }
1192
1193 #endif
1194
1195 void __init ping_init(void)
1196 {
1197 int i;
1198
1199 for (i = 0; i < PING_HTABLE_SIZE; i++)
1200 INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i);
1201 spin_lock_init(&ping_table.lock);
1202 }