0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #include <linux/rhashtable.h>
0039 #include <linux/sched/signal.h>
0040
0041 #include "core.h"
0042 #include "name_table.h"
0043 #include "node.h"
0044 #include "link.h"
0045 #include "name_distr.h"
0046 #include "socket.h"
0047 #include "bcast.h"
0048 #include "netlink.h"
0049 #include "group.h"
0050 #include "trace.h"
0051
0052 #define NAGLE_START_INIT 4
0053 #define NAGLE_START_MAX 1024
0054 #define CONN_TIMEOUT_DEFAULT 8000
0055 #define CONN_PROBING_INTV msecs_to_jiffies(3600000)
0056 #define TIPC_MAX_PORT 0xffffffff
0057 #define TIPC_MIN_PORT 1
0058 #define TIPC_ACK_RATE 4
0059
0060 enum {
0061 TIPC_LISTEN = TCP_LISTEN,
0062 TIPC_ESTABLISHED = TCP_ESTABLISHED,
0063 TIPC_OPEN = TCP_CLOSE,
0064 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
0065 TIPC_CONNECTING = TCP_SYN_SENT,
0066 };
0067
0068 struct sockaddr_pair {
0069 struct sockaddr_tipc sock;
0070 struct sockaddr_tipc member;
0071 };
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 struct tipc_sock {
0110 struct sock sk;
0111 u32 max_pkt;
0112 u32 maxnagle;
0113 u32 portid;
0114 struct tipc_msg phdr;
0115 struct list_head cong_links;
0116 struct list_head publications;
0117 u32 pub_count;
0118 atomic_t dupl_rcvcnt;
0119 u16 conn_timeout;
0120 bool probe_unacked;
0121 u16 cong_link_cnt;
0122 u16 snt_unacked;
0123 u16 snd_win;
0124 u16 peer_caps;
0125 u16 rcv_unacked;
0126 u16 rcv_win;
0127 struct sockaddr_tipc peer;
0128 struct rhash_head node;
0129 struct tipc_mc_method mc_method;
0130 struct rcu_head rcu;
0131 struct tipc_group *group;
0132 u32 oneway;
0133 u32 nagle_start;
0134 u16 snd_backlog;
0135 u16 msg_acc;
0136 u16 pkt_cnt;
0137 bool expect_ack;
0138 bool nodelay;
0139 bool group_is_open;
0140 bool published;
0141 u8 conn_addrtype;
0142 };
0143
0144 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
0145 static void tipc_data_ready(struct sock *sk);
0146 static void tipc_write_space(struct sock *sk);
0147 static void tipc_sock_destruct(struct sock *sk);
0148 static int tipc_release(struct socket *sock);
0149 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
0150 bool kern);
0151 static void tipc_sk_timeout(struct timer_list *t);
0152 static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
0153 static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
0154 static int tipc_sk_leave(struct tipc_sock *tsk);
0155 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
0156 static int tipc_sk_insert(struct tipc_sock *tsk);
0157 static void tipc_sk_remove(struct tipc_sock *tsk);
0158 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
0159 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
0160 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
0161 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
0162
0163 static const struct proto_ops packet_ops;
0164 static const struct proto_ops stream_ops;
0165 static const struct proto_ops msg_ops;
0166 static struct proto tipc_proto;
0167 static const struct rhashtable_params tsk_rht_params;
0168
0169 static u32 tsk_own_node(struct tipc_sock *tsk)
0170 {
0171 return msg_prevnode(&tsk->phdr);
0172 }
0173
0174 static u32 tsk_peer_node(struct tipc_sock *tsk)
0175 {
0176 return msg_destnode(&tsk->phdr);
0177 }
0178
0179 static u32 tsk_peer_port(struct tipc_sock *tsk)
0180 {
0181 return msg_destport(&tsk->phdr);
0182 }
0183
0184 static bool tsk_unreliable(struct tipc_sock *tsk)
0185 {
0186 return msg_src_droppable(&tsk->phdr) != 0;
0187 }
0188
0189 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
0190 {
0191 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
0192 }
0193
0194 static bool tsk_unreturnable(struct tipc_sock *tsk)
0195 {
0196 return msg_dest_droppable(&tsk->phdr) != 0;
0197 }
0198
0199 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
0200 {
0201 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
0202 }
0203
0204 static int tsk_importance(struct tipc_sock *tsk)
0205 {
0206 return msg_importance(&tsk->phdr);
0207 }
0208
0209 static struct tipc_sock *tipc_sk(const struct sock *sk)
0210 {
0211 return container_of(sk, struct tipc_sock, sk);
0212 }
0213
0214 int tsk_set_importance(struct sock *sk, int imp)
0215 {
0216 if (imp > TIPC_CRITICAL_IMPORTANCE)
0217 return -EINVAL;
0218 msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
0219 return 0;
0220 }
0221
0222 static bool tsk_conn_cong(struct tipc_sock *tsk)
0223 {
0224 return tsk->snt_unacked > tsk->snd_win;
0225 }
0226
0227 static u16 tsk_blocks(int len)
0228 {
0229 return ((len / FLOWCTL_BLK_SZ) + 1);
0230 }
0231
0232
0233
0234
0235
0236 static u16 tsk_adv_blocks(int len)
0237 {
0238 return len / FLOWCTL_BLK_SZ / 4;
0239 }
0240
0241
0242
0243
0244
0245 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
0246 {
0247 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
0248 return ((msglen / FLOWCTL_BLK_SZ) + 1);
0249 return 1;
0250 }
0251
0252
0253
0254 static void tsk_set_nagle(struct tipc_sock *tsk)
0255 {
0256 struct sock *sk = &tsk->sk;
0257
0258 tsk->maxnagle = 0;
0259 if (sk->sk_type != SOCK_STREAM)
0260 return;
0261 if (tsk->nodelay)
0262 return;
0263 if (!(tsk->peer_caps & TIPC_NAGLE))
0264 return;
0265
0266 if (tsk->max_pkt == MAX_MSG_SIZE)
0267 tsk->maxnagle = 1500;
0268 else
0269 tsk->maxnagle = tsk->max_pkt;
0270 }
0271
0272
0273
0274
0275
0276
0277
0278 static void tsk_advance_rx_queue(struct sock *sk)
0279 {
0280 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
0281 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
0282 }
0283
0284
0285
0286 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
0287 {
0288 u32 selector;
0289 u32 dnode;
0290 u32 onode = tipc_own_addr(sock_net(sk));
0291
0292 if (!tipc_msg_reverse(onode, &skb, err))
0293 return;
0294
0295 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
0296 dnode = msg_destnode(buf_msg(skb));
0297 selector = msg_origport(buf_msg(skb));
0298 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
0299 }
0300
0301
0302
0303
0304
0305
0306
0307
0308 static void tsk_rej_rx_queue(struct sock *sk, int error)
0309 {
0310 struct sk_buff *skb;
0311
0312 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
0313 tipc_sk_respond(sk, skb, error);
0314 }
0315
0316 static bool tipc_sk_connected(struct sock *sk)
0317 {
0318 return sk->sk_state == TIPC_ESTABLISHED;
0319 }
0320
0321
0322
0323
0324
0325
0326 static bool tipc_sk_type_connectionless(struct sock *sk)
0327 {
0328 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
0329 }
0330
0331
0332
0333
0334
0335
0336 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
0337 {
0338 struct sock *sk = &tsk->sk;
0339 u32 self = tipc_own_addr(sock_net(sk));
0340 u32 peer_port = tsk_peer_port(tsk);
0341 u32 orig_node, peer_node;
0342
0343 if (unlikely(!tipc_sk_connected(sk)))
0344 return false;
0345
0346 if (unlikely(msg_origport(msg) != peer_port))
0347 return false;
0348
0349 orig_node = msg_orignode(msg);
0350 peer_node = tsk_peer_node(tsk);
0351
0352 if (likely(orig_node == peer_node))
0353 return true;
0354
0355 if (!orig_node && peer_node == self)
0356 return true;
0357
0358 if (!peer_node && orig_node == self)
0359 return true;
0360
0361 return false;
0362 }
0363
0364
0365
0366
0367
0368
0369
0370
0371 static int tipc_set_sk_state(struct sock *sk, int state)
0372 {
0373 int oldsk_state = sk->sk_state;
0374 int res = -EINVAL;
0375
0376 switch (state) {
0377 case TIPC_OPEN:
0378 res = 0;
0379 break;
0380 case TIPC_LISTEN:
0381 case TIPC_CONNECTING:
0382 if (oldsk_state == TIPC_OPEN)
0383 res = 0;
0384 break;
0385 case TIPC_ESTABLISHED:
0386 if (oldsk_state == TIPC_CONNECTING ||
0387 oldsk_state == TIPC_OPEN)
0388 res = 0;
0389 break;
0390 case TIPC_DISCONNECTING:
0391 if (oldsk_state == TIPC_CONNECTING ||
0392 oldsk_state == TIPC_ESTABLISHED)
0393 res = 0;
0394 break;
0395 }
0396
0397 if (!res)
0398 sk->sk_state = state;
0399
0400 return res;
0401 }
0402
0403 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
0404 {
0405 struct sock *sk = sock->sk;
0406 int err = sock_error(sk);
0407 int typ = sock->type;
0408
0409 if (err)
0410 return err;
0411 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
0412 if (sk->sk_state == TIPC_DISCONNECTING)
0413 return -EPIPE;
0414 else if (!tipc_sk_connected(sk))
0415 return -ENOTCONN;
0416 }
0417 if (!*timeout)
0418 return -EAGAIN;
0419 if (signal_pending(current))
0420 return sock_intr_errno(*timeout);
0421
0422 return 0;
0423 }
0424
0425 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
0426 ({ \
0427 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
0428 struct sock *sk_; \
0429 int rc_; \
0430 \
0431 while ((rc_ = !(condition_))) { \
0432 \
0433 smp_rmb(); \
0434 sk_ = (sock_)->sk; \
0435 rc_ = tipc_sk_sock_err((sock_), timeo_); \
0436 if (rc_) \
0437 break; \
0438 add_wait_queue(sk_sleep(sk_), &wait_); \
0439 release_sock(sk_); \
0440 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
0441 sched_annotate_sleep(); \
0442 lock_sock(sk_); \
0443 remove_wait_queue(sk_sleep(sk_), &wait_); \
0444 } \
0445 rc_; \
0446 })
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 static int tipc_sk_create(struct net *net, struct socket *sock,
0461 int protocol, int kern)
0462 {
0463 const struct proto_ops *ops;
0464 struct sock *sk;
0465 struct tipc_sock *tsk;
0466 struct tipc_msg *msg;
0467
0468
0469 if (unlikely(protocol != 0))
0470 return -EPROTONOSUPPORT;
0471
0472 switch (sock->type) {
0473 case SOCK_STREAM:
0474 ops = &stream_ops;
0475 break;
0476 case SOCK_SEQPACKET:
0477 ops = &packet_ops;
0478 break;
0479 case SOCK_DGRAM:
0480 case SOCK_RDM:
0481 ops = &msg_ops;
0482 break;
0483 default:
0484 return -EPROTOTYPE;
0485 }
0486
0487
0488 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
0489 if (sk == NULL)
0490 return -ENOMEM;
0491
0492 tsk = tipc_sk(sk);
0493 tsk->max_pkt = MAX_PKT_DEFAULT;
0494 tsk->maxnagle = 0;
0495 tsk->nagle_start = NAGLE_START_INIT;
0496 INIT_LIST_HEAD(&tsk->publications);
0497 INIT_LIST_HEAD(&tsk->cong_links);
0498 msg = &tsk->phdr;
0499
0500
0501 sock->ops = ops;
0502 sock_init_data(sock, sk);
0503 tipc_set_sk_state(sk, TIPC_OPEN);
0504 if (tipc_sk_insert(tsk)) {
0505 sk_free(sk);
0506 pr_warn("Socket create failed; port number exhausted\n");
0507 return -EINVAL;
0508 }
0509
0510
0511 smp_mb();
0512
0513 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
0514 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
0515
0516 msg_set_origport(msg, tsk->portid);
0517 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
0518 sk->sk_shutdown = 0;
0519 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
0520 sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
0521 sk->sk_data_ready = tipc_data_ready;
0522 sk->sk_write_space = tipc_write_space;
0523 sk->sk_destruct = tipc_sock_destruct;
0524 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
0525 tsk->group_is_open = true;
0526 atomic_set(&tsk->dupl_rcvcnt, 0);
0527
0528
0529 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
0530 tsk->rcv_win = tsk->snd_win;
0531
0532 if (tipc_sk_type_connectionless(sk)) {
0533 tsk_set_unreturnable(tsk, true);
0534 if (sock->type == SOCK_DGRAM)
0535 tsk_set_unreliable(tsk, true);
0536 }
0537 __skb_queue_head_init(&tsk->mc_method.deferredq);
0538 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
0539 return 0;
0540 }
0541
0542 static void tipc_sk_callback(struct rcu_head *head)
0543 {
0544 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
0545
0546 sock_put(&tsk->sk);
0547 }
0548
0549
0550 static void __tipc_shutdown(struct socket *sock, int error)
0551 {
0552 struct sock *sk = sock->sk;
0553 struct tipc_sock *tsk = tipc_sk(sk);
0554 struct net *net = sock_net(sk);
0555 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
0556 u32 dnode = tsk_peer_node(tsk);
0557 struct sk_buff *skb;
0558
0559
0560 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
0561 !tsk_conn_cong(tsk)));
0562
0563
0564 tipc_sk_push_backlog(tsk, false);
0565
0566 __skb_queue_purge(&sk->sk_write_queue);
0567
0568
0569 skb = skb_peek(&sk->sk_receive_queue);
0570 if (skb && TIPC_SKB_CB(skb)->bytes_read) {
0571 __skb_unlink(skb, &sk->sk_receive_queue);
0572 kfree_skb(skb);
0573 }
0574
0575
0576 if (tipc_sk_type_connectionless(sk)) {
0577 tsk_rej_rx_queue(sk, error);
0578 return;
0579 }
0580
0581 switch (sk->sk_state) {
0582 case TIPC_CONNECTING:
0583 case TIPC_ESTABLISHED:
0584 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
0585 tipc_node_remove_conn(net, dnode, tsk->portid);
0586
0587 skb = __skb_dequeue(&sk->sk_receive_queue);
0588 if (skb) {
0589 __skb_queue_purge(&sk->sk_receive_queue);
0590 tipc_sk_respond(sk, skb, error);
0591 break;
0592 }
0593 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
0594 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
0595 tsk_own_node(tsk), tsk_peer_port(tsk),
0596 tsk->portid, error);
0597 if (skb)
0598 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
0599 break;
0600 case TIPC_LISTEN:
0601
0602 tsk_rej_rx_queue(sk, error);
0603 break;
0604 default:
0605 __skb_queue_purge(&sk->sk_receive_queue);
0606 break;
0607 }
0608 }
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 static int tipc_release(struct socket *sock)
0627 {
0628 struct sock *sk = sock->sk;
0629 struct tipc_sock *tsk;
0630
0631
0632
0633
0634
0635 if (sk == NULL)
0636 return 0;
0637
0638 tsk = tipc_sk(sk);
0639 lock_sock(sk);
0640
0641 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
0642 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
0643 sk->sk_shutdown = SHUTDOWN_MASK;
0644 tipc_sk_leave(tsk);
0645 tipc_sk_withdraw(tsk, NULL);
0646 __skb_queue_purge(&tsk->mc_method.deferredq);
0647 sk_stop_timer(sk, &sk->sk_timer);
0648 tipc_sk_remove(tsk);
0649
0650 sock_orphan(sk);
0651
0652 release_sock(sk);
0653 tipc_dest_list_purge(&tsk->cong_links);
0654 tsk->cong_link_cnt = 0;
0655 call_rcu(&tsk->rcu, tipc_sk_callback);
0656 sock->sk = NULL;
0657
0658 return 0;
0659 }
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676 static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
0677 {
0678 struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
0679 struct tipc_sock *tsk = tipc_sk(sock->sk);
0680 bool unbind = false;
0681
0682 if (unlikely(!alen))
0683 return tipc_sk_withdraw(tsk, NULL);
0684
0685 if (ua->addrtype == TIPC_SERVICE_ADDR) {
0686 ua->addrtype = TIPC_SERVICE_RANGE;
0687 ua->sr.upper = ua->sr.lower;
0688 }
0689 if (ua->scope < 0) {
0690 unbind = true;
0691 ua->scope = -ua->scope;
0692 }
0693
0694 if (ua->scope != TIPC_NODE_SCOPE)
0695 ua->scope = TIPC_CLUSTER_SCOPE;
0696
0697 if (tsk->group)
0698 return -EACCES;
0699
0700 if (unbind)
0701 return tipc_sk_withdraw(tsk, ua);
0702 return tipc_sk_publish(tsk, ua);
0703 }
0704
0705 int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
0706 {
0707 int res;
0708
0709 lock_sock(sock->sk);
0710 res = __tipc_bind(sock, skaddr, alen);
0711 release_sock(sock->sk);
0712 return res;
0713 }
0714
0715 static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
0716 {
0717 struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
0718 u32 atype = ua->addrtype;
0719
0720 if (alen) {
0721 if (!tipc_uaddr_valid(ua, alen))
0722 return -EINVAL;
0723 if (atype == TIPC_SOCKET_ADDR)
0724 return -EAFNOSUPPORT;
0725 if (ua->sr.type < TIPC_RESERVED_TYPES) {
0726 pr_warn_once("Can't bind to reserved service type %u\n",
0727 ua->sr.type);
0728 return -EACCES;
0729 }
0730 }
0731 return tipc_sk_bind(sock, skaddr, alen);
0732 }
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
0747 int peer)
0748 {
0749 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
0750 struct sock *sk = sock->sk;
0751 struct tipc_sock *tsk = tipc_sk(sk);
0752
0753 memset(addr, 0, sizeof(*addr));
0754 if (peer) {
0755 if ((!tipc_sk_connected(sk)) &&
0756 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
0757 return -ENOTCONN;
0758 addr->addr.id.ref = tsk_peer_port(tsk);
0759 addr->addr.id.node = tsk_peer_node(tsk);
0760 } else {
0761 addr->addr.id.ref = tsk->portid;
0762 addr->addr.id.node = tipc_own_addr(sock_net(sk));
0763 }
0764
0765 addr->addrtype = TIPC_SOCKET_ADDR;
0766 addr->family = AF_TIPC;
0767 addr->scope = 0;
0768 addr->addr.name.domain = 0;
0769
0770 return sizeof(*addr);
0771 }
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 static __poll_t tipc_poll(struct file *file, struct socket *sock,
0792 poll_table *wait)
0793 {
0794 struct sock *sk = sock->sk;
0795 struct tipc_sock *tsk = tipc_sk(sk);
0796 __poll_t revents = 0;
0797
0798 sock_poll_wait(file, sock, wait);
0799 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
0800
0801 if (sk->sk_shutdown & RCV_SHUTDOWN)
0802 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
0803 if (sk->sk_shutdown == SHUTDOWN_MASK)
0804 revents |= EPOLLHUP;
0805
0806 switch (sk->sk_state) {
0807 case TIPC_ESTABLISHED:
0808 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
0809 revents |= EPOLLOUT;
0810 fallthrough;
0811 case TIPC_LISTEN:
0812 case TIPC_CONNECTING:
0813 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
0814 revents |= EPOLLIN | EPOLLRDNORM;
0815 break;
0816 case TIPC_OPEN:
0817 if (tsk->group_is_open && !tsk->cong_link_cnt)
0818 revents |= EPOLLOUT;
0819 if (!tipc_sk_type_connectionless(sk))
0820 break;
0821 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
0822 break;
0823 revents |= EPOLLIN | EPOLLRDNORM;
0824 break;
0825 case TIPC_DISCONNECTING:
0826 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
0827 break;
0828 }
0829 return revents;
0830 }
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843 static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
0844 struct msghdr *msg, size_t dlen, long timeout)
0845 {
0846 struct sock *sk = sock->sk;
0847 struct tipc_sock *tsk = tipc_sk(sk);
0848 struct tipc_msg *hdr = &tsk->phdr;
0849 struct net *net = sock_net(sk);
0850 int mtu = tipc_bcast_get_mtu(net);
0851 struct sk_buff_head pkts;
0852 struct tipc_nlist dsts;
0853 int rc;
0854
0855 if (tsk->group)
0856 return -EACCES;
0857
0858
0859 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
0860 if (unlikely(rc))
0861 return rc;
0862
0863
0864 tipc_nlist_init(&dsts, tipc_own_addr(net));
0865 tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
0866 if (!dsts.local && !dsts.remote)
0867 return -EHOSTUNREACH;
0868
0869
0870 msg_set_type(hdr, TIPC_MCAST_MSG);
0871 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
0872 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
0873 msg_set_destport(hdr, 0);
0874 msg_set_destnode(hdr, 0);
0875 msg_set_nametype(hdr, ua->sr.type);
0876 msg_set_namelower(hdr, ua->sr.lower);
0877 msg_set_nameupper(hdr, ua->sr.upper);
0878
0879
0880 __skb_queue_head_init(&pkts);
0881 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
0882
0883
0884 if (unlikely(rc == dlen)) {
0885 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
0886 TIPC_DUMP_SK_SNDQ, " ");
0887 rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
0888 &tsk->cong_link_cnt);
0889 }
0890
0891 tipc_nlist_purge(&dsts);
0892
0893 return rc ? rc : dlen;
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
0907 struct msghdr *m, struct tipc_member *mb,
0908 u32 dnode, u32 dport, int dlen)
0909 {
0910 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
0911 struct tipc_mc_method *method = &tsk->mc_method;
0912 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
0913 struct tipc_msg *hdr = &tsk->phdr;
0914 struct sk_buff_head pkts;
0915 int mtu, rc;
0916
0917
0918 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
0919 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
0920 msg_set_destport(hdr, dport);
0921 msg_set_destnode(hdr, dnode);
0922 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
0923
0924
0925 __skb_queue_head_init(&pkts);
0926 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
0927 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
0928 if (unlikely(rc != dlen))
0929 return rc;
0930
0931
0932 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
0933 if (unlikely(rc == -ELINKCONG)) {
0934 tipc_dest_push(&tsk->cong_links, dnode, 0);
0935 tsk->cong_link_cnt++;
0936 }
0937
0938
0939 tipc_group_update_member(mb, blks);
0940
0941
0942 method->rcast = true;
0943 method->mandatory = true;
0944 return dlen;
0945 }
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
0958 int dlen, long timeout)
0959 {
0960 struct sock *sk = sock->sk;
0961 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
0962 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
0963 struct tipc_sock *tsk = tipc_sk(sk);
0964 struct net *net = sock_net(sk);
0965 struct tipc_member *mb = NULL;
0966 u32 node, port;
0967 int rc;
0968
0969 node = ua->sk.node;
0970 port = ua->sk.ref;
0971 if (!port && !node)
0972 return -EHOSTUNREACH;
0973
0974
0975 rc = tipc_wait_for_cond(sock, &timeout,
0976 !tipc_dest_find(&tsk->cong_links, node, 0) &&
0977 tsk->group &&
0978 !tipc_group_cong(tsk->group, node, port, blks,
0979 &mb));
0980 if (unlikely(rc))
0981 return rc;
0982
0983 if (unlikely(!mb))
0984 return -EHOSTUNREACH;
0985
0986 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
0987
0988 return rc ? rc : dlen;
0989 }
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
1002 int dlen, long timeout)
1003 {
1004 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1005 struct sock *sk = sock->sk;
1006 struct tipc_sock *tsk = tipc_sk(sk);
1007 struct list_head *cong_links = &tsk->cong_links;
1008 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
1009 struct tipc_msg *hdr = &tsk->phdr;
1010 struct tipc_member *first = NULL;
1011 struct tipc_member *mbr = NULL;
1012 struct net *net = sock_net(sk);
1013 u32 node, port, exclude;
1014 struct list_head dsts;
1015 int lookups = 0;
1016 int dstcnt, rc;
1017 bool cong;
1018
1019 INIT_LIST_HEAD(&dsts);
1020 ua->sa.type = msg_nametype(hdr);
1021 ua->scope = msg_lookup_scope(hdr);
1022
1023 while (++lookups < 4) {
1024 exclude = tipc_group_exclude(tsk->group);
1025
1026 first = NULL;
1027
1028
1029 while (1) {
1030 if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
1031 exclude, false))
1032 return -EHOSTUNREACH;
1033 tipc_dest_pop(&dsts, &node, &port);
1034 cong = tipc_group_cong(tsk->group, node, port, blks,
1035 &mbr);
1036 if (!cong)
1037 break;
1038 if (mbr == first)
1039 break;
1040 if (!first)
1041 first = mbr;
1042 }
1043
1044
1045 if (unlikely(!mbr))
1046 continue;
1047
1048 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
1049 break;
1050
1051
1052 rc = tipc_wait_for_cond(sock, &timeout,
1053 !tipc_dest_find(cong_links, node, 0) &&
1054 tsk->group &&
1055 !tipc_group_cong(tsk->group, node, port,
1056 blks, &mbr));
1057 if (unlikely(rc))
1058 return rc;
1059
1060
1061 if (likely(mbr))
1062 break;
1063 }
1064
1065 if (unlikely(lookups >= 4))
1066 return -EHOSTUNREACH;
1067
1068 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1069
1070 return rc ? rc : dlen;
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1084 int dlen, long timeout)
1085 {
1086 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1087 struct sock *sk = sock->sk;
1088 struct net *net = sock_net(sk);
1089 struct tipc_sock *tsk = tipc_sk(sk);
1090 struct tipc_nlist *dsts;
1091 struct tipc_mc_method *method = &tsk->mc_method;
1092 bool ack = method->mandatory && method->rcast;
1093 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1094 struct tipc_msg *hdr = &tsk->phdr;
1095 int mtu = tipc_bcast_get_mtu(net);
1096 struct sk_buff_head pkts;
1097 int rc = -EHOSTUNREACH;
1098
1099
1100 rc = tipc_wait_for_cond(sock, &timeout,
1101 !tsk->cong_link_cnt && tsk->group &&
1102 !tipc_group_bc_cong(tsk->group, blks));
1103 if (unlikely(rc))
1104 return rc;
1105
1106 dsts = tipc_group_dests(tsk->group);
1107 if (!dsts->local && !dsts->remote)
1108 return -EHOSTUNREACH;
1109
1110
1111 if (ua) {
1112 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1113 msg_set_nameinst(hdr, ua->sa.instance);
1114 } else {
1115 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1116 msg_set_nameinst(hdr, 0);
1117 }
1118 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1119 msg_set_destport(hdr, 0);
1120 msg_set_destnode(hdr, 0);
1121 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1122
1123
1124 msg_set_grp_bc_ack_req(hdr, ack);
1125
1126
1127 __skb_queue_head_init(&pkts);
1128 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1129 if (unlikely(rc != dlen))
1130 return rc;
1131
1132
1133 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1134 if (unlikely(rc))
1135 return rc;
1136
1137
1138 tipc_group_update_bc_members(tsk->group, blks, ack);
1139
1140
1141 method->mandatory = false;
1142 method->expires = jiffies;
1143
1144 return dlen;
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1158 int dlen, long timeout)
1159 {
1160 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1161 struct sock *sk = sock->sk;
1162 struct tipc_sock *tsk = tipc_sk(sk);
1163 struct tipc_group *grp = tsk->group;
1164 struct tipc_msg *hdr = &tsk->phdr;
1165 struct net *net = sock_net(sk);
1166 struct list_head dsts;
1167 u32 dstcnt, exclude;
1168
1169 INIT_LIST_HEAD(&dsts);
1170 ua->sa.type = msg_nametype(hdr);
1171 ua->scope = msg_lookup_scope(hdr);
1172 exclude = tipc_group_exclude(grp);
1173
1174 if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
1175 return -EHOSTUNREACH;
1176
1177 if (dstcnt == 1) {
1178 tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
1179 return tipc_send_group_unicast(sock, m, dlen, timeout);
1180 }
1181
1182 tipc_dest_list_purge(&dsts);
1183 return tipc_send_group_bcast(sock, m, dlen, timeout);
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1195 struct sk_buff_head *inputq)
1196 {
1197 u32 self = tipc_own_addr(net);
1198 struct sk_buff *skb, *_skb;
1199 u32 portid, onode;
1200 struct sk_buff_head tmpq;
1201 struct list_head dports;
1202 struct tipc_msg *hdr;
1203 struct tipc_uaddr ua;
1204 int user, mtyp, hlen;
1205
1206 __skb_queue_head_init(&tmpq);
1207 INIT_LIST_HEAD(&dports);
1208 ua.addrtype = TIPC_SERVICE_RANGE;
1209
1210
1211 skb = tipc_skb_peek(arrvq, &inputq->lock);
1212 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1213 hdr = buf_msg(skb);
1214 user = msg_user(hdr);
1215 mtyp = msg_type(hdr);
1216 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1217 onode = msg_orignode(hdr);
1218 ua.sr.type = msg_nametype(hdr);
1219 ua.sr.lower = msg_namelower(hdr);
1220 ua.sr.upper = msg_nameupper(hdr);
1221 if (onode == self)
1222 ua.scope = TIPC_ANY_SCOPE;
1223 else
1224 ua.scope = TIPC_CLUSTER_SCOPE;
1225
1226 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1227 spin_lock_bh(&inputq->lock);
1228 if (skb_peek(arrvq) == skb) {
1229 __skb_dequeue(arrvq);
1230 __skb_queue_tail(inputq, skb);
1231 }
1232 kfree_skb(skb);
1233 spin_unlock_bh(&inputq->lock);
1234 continue;
1235 }
1236
1237
1238 if (msg_in_group(hdr)) {
1239 ua.sr.lower = 0;
1240 ua.sr.upper = ~0;
1241 ua.scope = msg_lookup_scope(hdr);
1242 }
1243
1244
1245 tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
1246
1247
1248 while (tipc_dest_pop(&dports, NULL, &portid)) {
1249 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1250 if (_skb) {
1251 msg_set_destport(buf_msg(_skb), portid);
1252 __skb_queue_tail(&tmpq, _skb);
1253 continue;
1254 }
1255 pr_warn("Failed to clone mcast rcv buffer\n");
1256 }
1257
1258 spin_lock_bh(&inputq->lock);
1259 if (skb_peek(arrvq) == skb) {
1260 skb_queue_splice_tail_init(&tmpq, inputq);
1261
1262 kfree_skb(__skb_dequeue(arrvq));
1263 }
1264 spin_unlock_bh(&inputq->lock);
1265 __skb_queue_purge(&tmpq);
1266 kfree_skb(skb);
1267 }
1268 tipc_sk_rcv(net, inputq);
1269 }
1270
1271
1272
1273
1274 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1275 {
1276 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1277 struct sk_buff *skb = skb_peek_tail(txq);
1278 struct net *net = sock_net(&tsk->sk);
1279 u32 dnode = tsk_peer_node(tsk);
1280 int rc;
1281
1282 if (nagle_ack) {
1283 tsk->pkt_cnt += skb_queue_len(txq);
1284 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1285 tsk->oneway = 0;
1286 if (tsk->nagle_start < NAGLE_START_MAX)
1287 tsk->nagle_start *= 2;
1288 tsk->expect_ack = false;
1289 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1290 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1291 tsk->nagle_start);
1292 } else {
1293 tsk->nagle_start = NAGLE_START_INIT;
1294 if (skb) {
1295 msg_set_ack_required(buf_msg(skb));
1296 tsk->expect_ack = true;
1297 } else {
1298 tsk->expect_ack = false;
1299 }
1300 }
1301 tsk->msg_acc = 0;
1302 tsk->pkt_cnt = 0;
1303 }
1304
1305 if (!skb || tsk->cong_link_cnt)
1306 return;
1307
1308
1309 if (msg_is_syn(buf_msg(skb)))
1310 return;
1311
1312 if (tsk->msg_acc)
1313 tsk->pkt_cnt += skb_queue_len(txq);
1314 tsk->snt_unacked += tsk->snd_backlog;
1315 tsk->snd_backlog = 0;
1316 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1317 if (rc == -ELINKCONG)
1318 tsk->cong_link_cnt = 1;
1319 }
1320
1321
1322
1323
1324
1325
1326
1327
1328 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1329 struct sk_buff_head *inputq,
1330 struct sk_buff_head *xmitq)
1331 {
1332 struct tipc_msg *hdr = buf_msg(skb);
1333 u32 onode = tsk_own_node(tsk);
1334 struct sock *sk = &tsk->sk;
1335 int mtyp = msg_type(hdr);
1336 bool was_cong;
1337
1338
1339 if (!tsk_peer_msg(tsk, hdr)) {
1340 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1341 goto exit;
1342 }
1343
1344 if (unlikely(msg_errcode(hdr))) {
1345 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1346 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1347 tsk_peer_port(tsk));
1348 sk->sk_state_change(sk);
1349
1350
1351
1352
1353 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1354 msg_set_type(hdr, TIPC_CONN_MSG);
1355 msg_set_size(hdr, BASIC_H_SIZE);
1356 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1357 __skb_queue_tail(inputq, skb);
1358 return;
1359 }
1360
1361 tsk->probe_unacked = false;
1362
1363 if (mtyp == CONN_PROBE) {
1364 msg_set_type(hdr, CONN_PROBE_REPLY);
1365 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1366 __skb_queue_tail(xmitq, skb);
1367 return;
1368 } else if (mtyp == CONN_ACK) {
1369 was_cong = tsk_conn_cong(tsk);
1370 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1371 tsk->snt_unacked -= msg_conn_ack(hdr);
1372 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1373 tsk->snd_win = msg_adv_win(hdr);
1374 if (was_cong && !tsk_conn_cong(tsk))
1375 sk->sk_write_space(sk);
1376 } else if (mtyp != CONN_PROBE_REPLY) {
1377 pr_warn("Received unknown CONN_PROTO msg\n");
1378 }
1379 exit:
1380 kfree_skb(skb);
1381 }
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396 static int tipc_sendmsg(struct socket *sock,
1397 struct msghdr *m, size_t dsz)
1398 {
1399 struct sock *sk = sock->sk;
1400 int ret;
1401
1402 lock_sock(sk);
1403 ret = __tipc_sendmsg(sock, m, dsz);
1404 release_sock(sk);
1405
1406 return ret;
1407 }
1408
1409 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1410 {
1411 struct sock *sk = sock->sk;
1412 struct net *net = sock_net(sk);
1413 struct tipc_sock *tsk = tipc_sk(sk);
1414 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1415 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1416 struct list_head *clinks = &tsk->cong_links;
1417 bool syn = !tipc_sk_type_connectionless(sk);
1418 struct tipc_group *grp = tsk->group;
1419 struct tipc_msg *hdr = &tsk->phdr;
1420 struct tipc_socket_addr skaddr;
1421 struct sk_buff_head pkts;
1422 int atype, mtu, rc;
1423
1424 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1425 return -EMSGSIZE;
1426
1427 if (ua) {
1428 if (!tipc_uaddr_valid(ua, m->msg_namelen))
1429 return -EINVAL;
1430 atype = ua->addrtype;
1431 }
1432
1433
1434 if (grp) {
1435 if (!ua)
1436 return tipc_send_group_bcast(sock, m, dlen, timeout);
1437 if (atype == TIPC_SERVICE_ADDR)
1438 return tipc_send_group_anycast(sock, m, dlen, timeout);
1439 if (atype == TIPC_SOCKET_ADDR)
1440 return tipc_send_group_unicast(sock, m, dlen, timeout);
1441 if (atype == TIPC_SERVICE_RANGE)
1442 return tipc_send_group_mcast(sock, m, dlen, timeout);
1443 return -EINVAL;
1444 }
1445
1446 if (!ua) {
1447 ua = (struct tipc_uaddr *)&tsk->peer;
1448 if (!syn && ua->family != AF_TIPC)
1449 return -EDESTADDRREQ;
1450 atype = ua->addrtype;
1451 }
1452
1453 if (unlikely(syn)) {
1454 if (sk->sk_state == TIPC_LISTEN)
1455 return -EPIPE;
1456 if (sk->sk_state != TIPC_OPEN)
1457 return -EISCONN;
1458 if (tsk->published)
1459 return -EOPNOTSUPP;
1460 if (atype == TIPC_SERVICE_ADDR)
1461 tsk->conn_addrtype = atype;
1462 msg_set_syn(hdr, 1);
1463 }
1464
1465 memset(&skaddr, 0, sizeof(skaddr));
1466
1467
1468 if (atype == TIPC_SERVICE_RANGE) {
1469 return tipc_sendmcast(sock, ua, m, dlen, timeout);
1470 } else if (atype == TIPC_SERVICE_ADDR) {
1471 skaddr.node = ua->lookup_node;
1472 ua->scope = tipc_node2scope(skaddr.node);
1473 if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
1474 return -EHOSTUNREACH;
1475 } else if (atype == TIPC_SOCKET_ADDR) {
1476 skaddr = ua->sk;
1477 } else {
1478 return -EINVAL;
1479 }
1480
1481
1482 rc = tipc_wait_for_cond(sock, &timeout,
1483 !tipc_dest_find(clinks, skaddr.node, 0));
1484 if (unlikely(rc))
1485 return rc;
1486
1487
1488 msg_set_destnode(hdr, skaddr.node);
1489 msg_set_destport(hdr, skaddr.ref);
1490 if (atype == TIPC_SERVICE_ADDR) {
1491 msg_set_type(hdr, TIPC_NAMED_MSG);
1492 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1493 msg_set_nametype(hdr, ua->sa.type);
1494 msg_set_nameinst(hdr, ua->sa.instance);
1495 msg_set_lookup_scope(hdr, ua->scope);
1496 } else {
1497 msg_set_type(hdr, TIPC_DIRECT_MSG);
1498 msg_set_lookup_scope(hdr, 0);
1499 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1500 }
1501
1502
1503 __skb_queue_head_init(&pkts);
1504 mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1505 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1506 if (unlikely(rc != dlen))
1507 return rc;
1508 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1509 __skb_queue_purge(&pkts);
1510 return -ENOMEM;
1511 }
1512
1513
1514 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1515 rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1516 if (unlikely(rc == -ELINKCONG)) {
1517 tipc_dest_push(clinks, skaddr.node, 0);
1518 tsk->cong_link_cnt++;
1519 rc = 0;
1520 }
1521
1522 if (unlikely(syn && !rc)) {
1523 tipc_set_sk_state(sk, TIPC_CONNECTING);
1524 if (dlen && timeout) {
1525 timeout = msecs_to_jiffies(timeout);
1526 tipc_wait_for_connect(sock, &timeout);
1527 }
1528 }
1529
1530 return rc ? rc : dlen;
1531 }
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1545 {
1546 struct sock *sk = sock->sk;
1547 int ret;
1548
1549 lock_sock(sk);
1550 ret = __tipc_sendstream(sock, m, dsz);
1551 release_sock(sk);
1552
1553 return ret;
1554 }
1555
1556 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1557 {
1558 struct sock *sk = sock->sk;
1559 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1560 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1561 struct sk_buff_head *txq = &sk->sk_write_queue;
1562 struct tipc_sock *tsk = tipc_sk(sk);
1563 struct tipc_msg *hdr = &tsk->phdr;
1564 struct net *net = sock_net(sk);
1565 struct sk_buff *skb;
1566 u32 dnode = tsk_peer_node(tsk);
1567 int maxnagle = tsk->maxnagle;
1568 int maxpkt = tsk->max_pkt;
1569 int send, sent = 0;
1570 int blocks, rc = 0;
1571
1572 if (unlikely(dlen > INT_MAX))
1573 return -EMSGSIZE;
1574
1575
1576 if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
1577 rc = __tipc_sendmsg(sock, m, dlen);
1578 if (dlen && dlen == rc) {
1579 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1580 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1581 }
1582 return rc;
1583 }
1584
1585 do {
1586 rc = tipc_wait_for_cond(sock, &timeout,
1587 (!tsk->cong_link_cnt &&
1588 !tsk_conn_cong(tsk) &&
1589 tipc_sk_connected(sk)));
1590 if (unlikely(rc))
1591 break;
1592 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1593 blocks = tsk->snd_backlog;
1594 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1595 send <= maxnagle) {
1596 rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
1597 if (unlikely(rc < 0))
1598 break;
1599 blocks += rc;
1600 tsk->msg_acc++;
1601 if (blocks <= 64 && tsk->expect_ack) {
1602 tsk->snd_backlog = blocks;
1603 sent += send;
1604 break;
1605 } else if (blocks > 64) {
1606 tsk->pkt_cnt += skb_queue_len(txq);
1607 } else {
1608 skb = skb_peek_tail(txq);
1609 if (skb) {
1610 msg_set_ack_required(buf_msg(skb));
1611 tsk->expect_ack = true;
1612 } else {
1613 tsk->expect_ack = false;
1614 }
1615 tsk->msg_acc = 0;
1616 tsk->pkt_cnt = 0;
1617 }
1618 } else {
1619 rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
1620 if (unlikely(rc != send))
1621 break;
1622 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1623 }
1624 trace_tipc_sk_sendstream(sk, skb_peek(txq),
1625 TIPC_DUMP_SK_SNDQ, " ");
1626 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1627 if (unlikely(rc == -ELINKCONG)) {
1628 tsk->cong_link_cnt = 1;
1629 rc = 0;
1630 }
1631 if (likely(!rc)) {
1632 tsk->snt_unacked += blocks;
1633 tsk->snd_backlog = 0;
1634 sent += send;
1635 }
1636 } while (sent < dlen && !rc);
1637
1638 return sent ? sent : rc;
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1652 {
1653 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1654 return -EMSGSIZE;
1655
1656 return tipc_sendstream(sock, m, dsz);
1657 }
1658
1659
1660
1661 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1662 u32 peer_node)
1663 {
1664 struct sock *sk = &tsk->sk;
1665 struct net *net = sock_net(sk);
1666 struct tipc_msg *msg = &tsk->phdr;
1667
1668 msg_set_syn(msg, 0);
1669 msg_set_destnode(msg, peer_node);
1670 msg_set_destport(msg, peer_port);
1671 msg_set_type(msg, TIPC_CONN_MSG);
1672 msg_set_lookup_scope(msg, 0);
1673 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1674
1675 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1676 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1677 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1678 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1679 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1680 tsk_set_nagle(tsk);
1681 __skb_queue_purge(&sk->sk_write_queue);
1682 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1683 return;
1684
1685
1686 tsk->rcv_win = FLOWCTL_MSG_WIN;
1687 tsk->snd_win = FLOWCTL_MSG_WIN;
1688 }
1689
1690
1691
1692
1693
1694
1695
1696
1697 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1698 {
1699 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1700 struct tipc_msg *hdr = buf_msg(skb);
1701
1702 if (!srcaddr)
1703 return;
1704
1705 srcaddr->sock.family = AF_TIPC;
1706 srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
1707 srcaddr->sock.scope = 0;
1708 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1709 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1710 srcaddr->sock.addr.name.domain = 0;
1711 m->msg_namelen = sizeof(struct sockaddr_tipc);
1712
1713 if (!msg_in_group(hdr))
1714 return;
1715
1716
1717 srcaddr->member.family = AF_TIPC;
1718 srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
1719 srcaddr->member.scope = 0;
1720 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1721 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1722 srcaddr->member.addr.name.domain = 0;
1723 m->msg_namelen = sizeof(*srcaddr);
1724 }
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1737 struct tipc_sock *tsk)
1738 {
1739 struct tipc_msg *hdr;
1740 u32 data[3] = {0,};
1741 bool has_addr;
1742 int dlen, rc;
1743
1744 if (likely(m->msg_controllen == 0))
1745 return 0;
1746
1747 hdr = buf_msg(skb);
1748 dlen = msg_data_sz(hdr);
1749
1750
1751 if (msg_errcode(hdr)) {
1752 if (skb_linearize(skb))
1753 return -ENOMEM;
1754 hdr = buf_msg(skb);
1755 data[0] = msg_errcode(hdr);
1756 data[1] = dlen;
1757 rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
1758 if (rc || !dlen)
1759 return rc;
1760 rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
1761 if (rc)
1762 return rc;
1763 }
1764
1765
1766 switch (msg_type(hdr)) {
1767 case TIPC_NAMED_MSG:
1768 has_addr = true;
1769 data[0] = msg_nametype(hdr);
1770 data[1] = msg_namelower(hdr);
1771 data[2] = data[1];
1772 break;
1773 case TIPC_MCAST_MSG:
1774 has_addr = true;
1775 data[0] = msg_nametype(hdr);
1776 data[1] = msg_namelower(hdr);
1777 data[2] = msg_nameupper(hdr);
1778 break;
1779 case TIPC_CONN_MSG:
1780 has_addr = !!tsk->conn_addrtype;
1781 data[0] = msg_nametype(&tsk->phdr);
1782 data[1] = msg_nameinst(&tsk->phdr);
1783 data[2] = data[1];
1784 break;
1785 default:
1786 has_addr = false;
1787 }
1788 if (!has_addr)
1789 return 0;
1790 return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
1791 }
1792
1793 static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1794 {
1795 struct sock *sk = &tsk->sk;
1796 struct sk_buff *skb = NULL;
1797 struct tipc_msg *msg;
1798 u32 peer_port = tsk_peer_port(tsk);
1799 u32 dnode = tsk_peer_node(tsk);
1800
1801 if (!tipc_sk_connected(sk))
1802 return NULL;
1803 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1804 dnode, tsk_own_node(tsk), peer_port,
1805 tsk->portid, TIPC_OK);
1806 if (!skb)
1807 return NULL;
1808 msg = buf_msg(skb);
1809 msg_set_conn_ack(msg, tsk->rcv_unacked);
1810 tsk->rcv_unacked = 0;
1811
1812
1813 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1814 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1815 msg_set_adv_win(msg, tsk->rcv_win);
1816 }
1817 return skb;
1818 }
1819
1820 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1821 {
1822 struct sk_buff *skb;
1823
1824 skb = tipc_sk_build_ack(tsk);
1825 if (!skb)
1826 return;
1827
1828 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1829 msg_link_selector(buf_msg(skb)));
1830 }
1831
1832 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1833 {
1834 struct sock *sk = sock->sk;
1835 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1836 long timeo = *timeop;
1837 int err = sock_error(sk);
1838
1839 if (err)
1840 return err;
1841
1842 for (;;) {
1843 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1844 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1845 err = -ENOTCONN;
1846 break;
1847 }
1848 add_wait_queue(sk_sleep(sk), &wait);
1849 release_sock(sk);
1850 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1851 sched_annotate_sleep();
1852 lock_sock(sk);
1853 remove_wait_queue(sk_sleep(sk), &wait);
1854 }
1855 err = 0;
1856 if (!skb_queue_empty(&sk->sk_receive_queue))
1857 break;
1858 err = -EAGAIN;
1859 if (!timeo)
1860 break;
1861 err = sock_intr_errno(timeo);
1862 if (signal_pending(current))
1863 break;
1864
1865 err = sock_error(sk);
1866 if (err)
1867 break;
1868 }
1869 *timeop = timeo;
1870 return err;
1871 }
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1886 size_t buflen, int flags)
1887 {
1888 struct sock *sk = sock->sk;
1889 bool connected = !tipc_sk_type_connectionless(sk);
1890 struct tipc_sock *tsk = tipc_sk(sk);
1891 int rc, err, hlen, dlen, copy;
1892 struct tipc_skb_cb *skb_cb;
1893 struct sk_buff_head xmitq;
1894 struct tipc_msg *hdr;
1895 struct sk_buff *skb;
1896 bool grp_evt;
1897 long timeout;
1898
1899
1900 if (unlikely(!buflen))
1901 return -EINVAL;
1902
1903 lock_sock(sk);
1904 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1905 rc = -ENOTCONN;
1906 goto exit;
1907 }
1908 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1909
1910
1911 do {
1912 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1913 if (unlikely(rc))
1914 goto exit;
1915 skb = skb_peek(&sk->sk_receive_queue);
1916 skb_cb = TIPC_SKB_CB(skb);
1917 hdr = buf_msg(skb);
1918 dlen = msg_data_sz(hdr);
1919 hlen = msg_hdr_sz(hdr);
1920 err = msg_errcode(hdr);
1921 grp_evt = msg_is_grp_evt(hdr);
1922 if (likely(dlen || err))
1923 break;
1924 tsk_advance_rx_queue(sk);
1925 } while (1);
1926
1927
1928 tipc_sk_set_orig_addr(m, skb);
1929 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1930 if (unlikely(rc))
1931 goto exit;
1932 hdr = buf_msg(skb);
1933
1934
1935 if (likely(!err)) {
1936 int offset = skb_cb->bytes_read;
1937
1938 copy = min_t(int, dlen - offset, buflen);
1939 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1940 if (unlikely(rc))
1941 goto exit;
1942 if (unlikely(offset + copy < dlen)) {
1943 if (flags & MSG_EOR) {
1944 if (!(flags & MSG_PEEK))
1945 skb_cb->bytes_read = offset + copy;
1946 } else {
1947 m->msg_flags |= MSG_TRUNC;
1948 skb_cb->bytes_read = 0;
1949 }
1950 } else {
1951 if (flags & MSG_EOR)
1952 m->msg_flags |= MSG_EOR;
1953 skb_cb->bytes_read = 0;
1954 }
1955 } else {
1956 copy = 0;
1957 rc = 0;
1958 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1959 rc = -ECONNRESET;
1960 goto exit;
1961 }
1962 }
1963
1964
1965 if (unlikely(grp_evt)) {
1966 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1967 m->msg_flags |= MSG_EOR;
1968 m->msg_flags |= MSG_OOB;
1969 copy = 0;
1970 }
1971
1972
1973 if (unlikely(flags & MSG_PEEK))
1974 goto exit;
1975
1976
1977 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1978 __skb_queue_head_init(&xmitq);
1979 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1980 msg_orignode(hdr), msg_origport(hdr),
1981 &xmitq);
1982 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1983 }
1984
1985 if (skb_cb->bytes_read)
1986 goto exit;
1987
1988 tsk_advance_rx_queue(sk);
1989
1990 if (likely(!connected))
1991 goto exit;
1992
1993
1994 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1995 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1996 tipc_sk_send_ack(tsk);
1997 exit:
1998 release_sock(sk);
1999 return rc ? rc : copy;
2000 }
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
2015 size_t buflen, int flags)
2016 {
2017 struct sock *sk = sock->sk;
2018 struct tipc_sock *tsk = tipc_sk(sk);
2019 struct sk_buff *skb;
2020 struct tipc_msg *hdr;
2021 struct tipc_skb_cb *skb_cb;
2022 bool peek = flags & MSG_PEEK;
2023 int offset, required, copy, copied = 0;
2024 int hlen, dlen, err, rc;
2025 long timeout;
2026
2027
2028 if (unlikely(!buflen))
2029 return -EINVAL;
2030
2031 lock_sock(sk);
2032
2033 if (unlikely(sk->sk_state == TIPC_OPEN)) {
2034 rc = -ENOTCONN;
2035 goto exit;
2036 }
2037 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
2038 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2039
2040 do {
2041
2042 rc = tipc_wait_for_rcvmsg(sock, &timeout);
2043 if (unlikely(rc))
2044 break;
2045 skb = skb_peek(&sk->sk_receive_queue);
2046 skb_cb = TIPC_SKB_CB(skb);
2047 hdr = buf_msg(skb);
2048 dlen = msg_data_sz(hdr);
2049 hlen = msg_hdr_sz(hdr);
2050 err = msg_errcode(hdr);
2051
2052
2053 if (unlikely(!dlen && !err)) {
2054 tsk_advance_rx_queue(sk);
2055 continue;
2056 }
2057
2058
2059 if (!copied) {
2060 tipc_sk_set_orig_addr(m, skb);
2061 rc = tipc_sk_anc_data_recv(m, skb, tsk);
2062 if (rc)
2063 break;
2064 hdr = buf_msg(skb);
2065 }
2066
2067
2068 if (likely(!err)) {
2069 offset = skb_cb->bytes_read;
2070 copy = min_t(int, dlen - offset, buflen - copied);
2071 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
2072 if (unlikely(rc))
2073 break;
2074 copied += copy;
2075 offset += copy;
2076 if (unlikely(offset < dlen)) {
2077 if (!peek)
2078 skb_cb->bytes_read = offset;
2079 break;
2080 }
2081 } else {
2082 rc = 0;
2083 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
2084 rc = -ECONNRESET;
2085 if (copied || rc)
2086 break;
2087 }
2088
2089 if (unlikely(peek))
2090 break;
2091
2092 tsk_advance_rx_queue(sk);
2093
2094
2095 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2096 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2097 tipc_sk_send_ack(tsk);
2098
2099
2100 if (copied == buflen || err)
2101 break;
2102
2103 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
2104 exit:
2105 release_sock(sk);
2106 return copied ? copied : rc;
2107 }
2108
2109
2110
2111
2112
2113 static void tipc_write_space(struct sock *sk)
2114 {
2115 struct socket_wq *wq;
2116
2117 rcu_read_lock();
2118 wq = rcu_dereference(sk->sk_wq);
2119 if (skwq_has_sleeper(wq))
2120 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2121 EPOLLWRNORM | EPOLLWRBAND);
2122 rcu_read_unlock();
2123 }
2124
2125
2126
2127
2128
2129 static void tipc_data_ready(struct sock *sk)
2130 {
2131 struct socket_wq *wq;
2132
2133 rcu_read_lock();
2134 wq = rcu_dereference(sk->sk_wq);
2135 if (skwq_has_sleeper(wq))
2136 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2137 EPOLLRDNORM | EPOLLRDBAND);
2138 rcu_read_unlock();
2139 }
2140
2141 static void tipc_sock_destruct(struct sock *sk)
2142 {
2143 __skb_queue_purge(&sk->sk_receive_queue);
2144 }
2145
2146 static void tipc_sk_proto_rcv(struct sock *sk,
2147 struct sk_buff_head *inputq,
2148 struct sk_buff_head *xmitq)
2149 {
2150 struct sk_buff *skb = __skb_dequeue(inputq);
2151 struct tipc_sock *tsk = tipc_sk(sk);
2152 struct tipc_msg *hdr = buf_msg(skb);
2153 struct tipc_group *grp = tsk->group;
2154 bool wakeup = false;
2155
2156 switch (msg_user(hdr)) {
2157 case CONN_MANAGER:
2158 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2159 return;
2160 case SOCK_WAKEUP:
2161 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2162
2163 smp_wmb();
2164 tsk->cong_link_cnt--;
2165 wakeup = true;
2166 tipc_sk_push_backlog(tsk, false);
2167 break;
2168 case GROUP_PROTOCOL:
2169 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
2170 break;
2171 case TOP_SRV:
2172 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2173 hdr, inputq, xmitq);
2174 break;
2175 default:
2176 break;
2177 }
2178
2179 if (wakeup)
2180 sk->sk_write_space(sk);
2181
2182 kfree_skb(skb);
2183 }
2184
2185
2186
2187
2188
2189
2190
2191
2192 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2193 struct sk_buff_head *xmitq)
2194 {
2195 struct sock *sk = &tsk->sk;
2196 struct net *net = sock_net(sk);
2197 struct tipc_msg *hdr = buf_msg(skb);
2198 bool con_msg = msg_connected(hdr);
2199 u32 pport = tsk_peer_port(tsk);
2200 u32 pnode = tsk_peer_node(tsk);
2201 u32 oport = msg_origport(hdr);
2202 u32 onode = msg_orignode(hdr);
2203 int err = msg_errcode(hdr);
2204 unsigned long delay;
2205
2206 if (unlikely(msg_mcast(hdr)))
2207 return false;
2208 tsk->oneway = 0;
2209
2210 switch (sk->sk_state) {
2211 case TIPC_CONNECTING:
2212
2213 if (likely(con_msg)) {
2214 if (err)
2215 break;
2216 tipc_sk_finish_conn(tsk, oport, onode);
2217 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2218
2219 if (msg_data_sz(hdr))
2220 return true;
2221
2222 sk->sk_state_change(sk);
2223 msg_set_dest_droppable(hdr, 1);
2224 return false;
2225 }
2226
2227 if (oport != pport || onode != pnode)
2228 return false;
2229
2230
2231 if (err != TIPC_ERR_OVERLOAD)
2232 break;
2233
2234
2235 if (skb_queue_empty(&sk->sk_write_queue))
2236 break;
2237 get_random_bytes(&delay, 2);
2238 delay %= (tsk->conn_timeout / 4);
2239 delay = msecs_to_jiffies(delay + 100);
2240 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2241 return false;
2242 case TIPC_OPEN:
2243 case TIPC_DISCONNECTING:
2244 return false;
2245 case TIPC_LISTEN:
2246
2247 if (!msg_is_syn(hdr) &&
2248 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2249 return false;
2250 if (!con_msg && !err)
2251 return true;
2252 return false;
2253 case TIPC_ESTABLISHED:
2254 if (!skb_queue_empty(&sk->sk_write_queue))
2255 tipc_sk_push_backlog(tsk, false);
2256
2257 if (likely(con_msg && !err && pport == oport &&
2258 pnode == onode)) {
2259 if (msg_ack_required(hdr)) {
2260 struct sk_buff *skb;
2261
2262 skb = tipc_sk_build_ack(tsk);
2263 if (skb) {
2264 msg_set_nagle_ack(buf_msg(skb));
2265 __skb_queue_tail(xmitq, skb);
2266 }
2267 }
2268 return true;
2269 }
2270 if (!tsk_peer_msg(tsk, hdr))
2271 return false;
2272 if (!err)
2273 return true;
2274 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2275 tipc_node_remove_conn(net, pnode, tsk->portid);
2276 sk->sk_state_change(sk);
2277 return true;
2278 default:
2279 pr_err("Unknown sk_state %u\n", sk->sk_state);
2280 }
2281
2282 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2283 sk->sk_err = ECONNREFUSED;
2284 sk->sk_state_change(sk);
2285 return true;
2286 }
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2307 {
2308 struct tipc_sock *tsk = tipc_sk(sk);
2309 struct tipc_msg *hdr = buf_msg(skb);
2310
2311 if (unlikely(msg_in_group(hdr)))
2312 return READ_ONCE(sk->sk_rcvbuf);
2313
2314 if (unlikely(!msg_connected(hdr)))
2315 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2316
2317 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2318 return READ_ONCE(sk->sk_rcvbuf);
2319
2320 return FLOWCTL_MSG_LIM;
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2335 struct sk_buff_head *xmitq)
2336 {
2337 bool sk_conn = !tipc_sk_type_connectionless(sk);
2338 struct tipc_sock *tsk = tipc_sk(sk);
2339 struct tipc_group *grp = tsk->group;
2340 struct tipc_msg *hdr = buf_msg(skb);
2341 struct net *net = sock_net(sk);
2342 struct sk_buff_head inputq;
2343 int mtyp = msg_type(hdr);
2344 int limit, err = TIPC_OK;
2345
2346 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2347 TIPC_SKB_CB(skb)->bytes_read = 0;
2348 __skb_queue_head_init(&inputq);
2349 __skb_queue_tail(&inputq, skb);
2350
2351 if (unlikely(!msg_isdata(hdr)))
2352 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2353
2354 if (unlikely(grp))
2355 tipc_group_filter_msg(grp, &inputq, xmitq);
2356
2357 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2358 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2359
2360
2361 while ((skb = __skb_dequeue(&inputq))) {
2362 hdr = buf_msg(skb);
2363 limit = rcvbuf_limit(sk, skb);
2364 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2365 (!sk_conn && msg_connected(hdr)) ||
2366 (!grp && msg_in_group(hdr)))
2367 err = TIPC_ERR_NO_PORT;
2368 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2369 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2370 "err_overload2!");
2371 atomic_inc(&sk->sk_drops);
2372 err = TIPC_ERR_OVERLOAD;
2373 }
2374
2375 if (unlikely(err)) {
2376 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2377 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2378 "@filter_rcv!");
2379 __skb_queue_tail(xmitq, skb);
2380 }
2381 err = TIPC_OK;
2382 continue;
2383 }
2384 __skb_queue_tail(&sk->sk_receive_queue, skb);
2385 skb_set_owner_r(skb, sk);
2386 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2387 "rcvq >90% allocated!");
2388 sk->sk_data_ready(sk);
2389 }
2390 }
2391
2392
2393
2394
2395
2396
2397
2398
2399 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2400 {
2401 unsigned int before = sk_rmem_alloc_get(sk);
2402 struct sk_buff_head xmitq;
2403 unsigned int added;
2404
2405 __skb_queue_head_init(&xmitq);
2406
2407 tipc_sk_filter_rcv(sk, skb, &xmitq);
2408 added = sk_rmem_alloc_get(sk) - before;
2409 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2410
2411
2412 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2413 return 0;
2414 }
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2427 u32 dport, struct sk_buff_head *xmitq)
2428 {
2429 unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
2430 struct sk_buff *skb;
2431 unsigned int lim;
2432 atomic_t *dcnt;
2433 u32 onode;
2434
2435 while (skb_queue_len(inputq)) {
2436 if (unlikely(time_after_eq(jiffies, time_limit)))
2437 return;
2438
2439 skb = tipc_skb_dequeue(inputq, dport);
2440 if (unlikely(!skb))
2441 return;
2442
2443
2444 if (!sock_owned_by_user(sk)) {
2445 tipc_sk_filter_rcv(sk, skb, xmitq);
2446 continue;
2447 }
2448
2449
2450 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2451 if (!sk->sk_backlog.len)
2452 atomic_set(dcnt, 0);
2453 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2454 if (likely(!sk_add_backlog(sk, skb, lim))) {
2455 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2456 "bklg & rcvq >90% allocated!");
2457 continue;
2458 }
2459
2460 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2461
2462 onode = tipc_own_addr(sock_net(sk));
2463 atomic_inc(&sk->sk_drops);
2464 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2465 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2466 "@sk_enqueue!");
2467 __skb_queue_tail(xmitq, skb);
2468 }
2469 break;
2470 }
2471 }
2472
2473
2474
2475
2476
2477
2478
2479
2480 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2481 {
2482 struct sk_buff_head xmitq;
2483 u32 dnode, dport = 0;
2484 int err;
2485 struct tipc_sock *tsk;
2486 struct sock *sk;
2487 struct sk_buff *skb;
2488
2489 __skb_queue_head_init(&xmitq);
2490 while (skb_queue_len(inputq)) {
2491 dport = tipc_skb_peek_port(inputq, dport);
2492 tsk = tipc_sk_lookup(net, dport);
2493
2494 if (likely(tsk)) {
2495 sk = &tsk->sk;
2496 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2497 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2498 spin_unlock_bh(&sk->sk_lock.slock);
2499 }
2500
2501 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2502 sock_put(sk);
2503 continue;
2504 }
2505
2506 skb = tipc_skb_dequeue(inputq, dport);
2507 if (!skb)
2508 return;
2509
2510
2511 err = TIPC_ERR_NO_PORT;
2512 if (tipc_msg_lookup_dest(net, skb, &err))
2513 goto xmit;
2514
2515
2516 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2517 continue;
2518
2519 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2520 xmit:
2521 dnode = msg_destnode(buf_msg(skb));
2522 tipc_node_xmit_skb(net, skb, dnode, dport);
2523 }
2524 }
2525
2526 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2527 {
2528 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2529 struct sock *sk = sock->sk;
2530 int done;
2531
2532 do {
2533 int err = sock_error(sk);
2534 if (err)
2535 return err;
2536 if (!*timeo_p)
2537 return -ETIMEDOUT;
2538 if (signal_pending(current))
2539 return sock_intr_errno(*timeo_p);
2540 if (sk->sk_state == TIPC_DISCONNECTING)
2541 break;
2542
2543 add_wait_queue(sk_sleep(sk), &wait);
2544 done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
2545 &wait);
2546 remove_wait_queue(sk_sleep(sk), &wait);
2547 } while (!done);
2548 return 0;
2549 }
2550
2551 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2552 {
2553 if (addr->family != AF_TIPC)
2554 return false;
2555 if (addr->addrtype == TIPC_SERVICE_RANGE)
2556 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2557 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2558 addr->addrtype == TIPC_SOCKET_ADDR);
2559 }
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2571 int destlen, int flags)
2572 {
2573 struct sock *sk = sock->sk;
2574 struct tipc_sock *tsk = tipc_sk(sk);
2575 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2576 struct msghdr m = {NULL,};
2577 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2578 int previous;
2579 int res = 0;
2580
2581 if (destlen != sizeof(struct sockaddr_tipc))
2582 return -EINVAL;
2583
2584 lock_sock(sk);
2585
2586 if (tsk->group) {
2587 res = -EINVAL;
2588 goto exit;
2589 }
2590
2591 if (dst->family == AF_UNSPEC) {
2592 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2593 if (!tipc_sk_type_connectionless(sk))
2594 res = -EINVAL;
2595 goto exit;
2596 }
2597 if (!tipc_sockaddr_is_sane(dst)) {
2598 res = -EINVAL;
2599 goto exit;
2600 }
2601
2602 if (tipc_sk_type_connectionless(sk)) {
2603 memcpy(&tsk->peer, dest, destlen);
2604 goto exit;
2605 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2606 res = -EINVAL;
2607 goto exit;
2608 }
2609
2610 previous = sk->sk_state;
2611
2612 switch (sk->sk_state) {
2613 case TIPC_OPEN:
2614
2615 m.msg_name = dest;
2616 m.msg_namelen = destlen;
2617
2618
2619
2620
2621 if (!timeout)
2622 m.msg_flags = MSG_DONTWAIT;
2623
2624 res = __tipc_sendmsg(sock, &m, 0);
2625 if ((res < 0) && (res != -EWOULDBLOCK))
2626 goto exit;
2627
2628
2629
2630
2631
2632 res = -EINPROGRESS;
2633 fallthrough;
2634 case TIPC_CONNECTING:
2635 if (!timeout) {
2636 if (previous == TIPC_CONNECTING)
2637 res = -EALREADY;
2638 goto exit;
2639 }
2640 timeout = msecs_to_jiffies(timeout);
2641
2642 res = tipc_wait_for_connect(sock, &timeout);
2643 break;
2644 case TIPC_ESTABLISHED:
2645 res = -EISCONN;
2646 break;
2647 default:
2648 res = -EINVAL;
2649 }
2650
2651 exit:
2652 release_sock(sk);
2653 return res;
2654 }
2655
2656
2657
2658
2659
2660
2661
2662
2663 static int tipc_listen(struct socket *sock, int len)
2664 {
2665 struct sock *sk = sock->sk;
2666 int res;
2667
2668 lock_sock(sk);
2669 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2670 release_sock(sk);
2671
2672 return res;
2673 }
2674
2675 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2676 {
2677 struct sock *sk = sock->sk;
2678 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2679 int err;
2680
2681
2682
2683
2684
2685
2686 for (;;) {
2687 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2688 add_wait_queue(sk_sleep(sk), &wait);
2689 release_sock(sk);
2690 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
2691 lock_sock(sk);
2692 remove_wait_queue(sk_sleep(sk), &wait);
2693 }
2694 err = 0;
2695 if (!skb_queue_empty(&sk->sk_receive_queue))
2696 break;
2697 err = -EAGAIN;
2698 if (!timeo)
2699 break;
2700 err = sock_intr_errno(timeo);
2701 if (signal_pending(current))
2702 break;
2703 }
2704 return err;
2705 }
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2717 bool kern)
2718 {
2719 struct sock *new_sk, *sk = sock->sk;
2720 struct tipc_sock *new_tsock;
2721 struct msghdr m = {NULL,};
2722 struct tipc_msg *msg;
2723 struct sk_buff *buf;
2724 long timeo;
2725 int res;
2726
2727 lock_sock(sk);
2728
2729 if (sk->sk_state != TIPC_LISTEN) {
2730 res = -EINVAL;
2731 goto exit;
2732 }
2733 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2734 res = tipc_wait_for_accept(sock, timeo);
2735 if (res)
2736 goto exit;
2737
2738 buf = skb_peek(&sk->sk_receive_queue);
2739
2740 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2741 if (res)
2742 goto exit;
2743 security_sk_clone(sock->sk, new_sock->sk);
2744
2745 new_sk = new_sock->sk;
2746 new_tsock = tipc_sk(new_sk);
2747 msg = buf_msg(buf);
2748
2749
2750 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2751
2752
2753
2754
2755
2756 tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
2757
2758
2759 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2760
2761 tsk_set_importance(new_sk, msg_importance(msg));
2762 if (msg_named(msg)) {
2763 new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
2764 msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
2765 msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
2766 }
2767
2768
2769
2770
2771
2772 if (!msg_data_sz(msg)) {
2773 tsk_advance_rx_queue(sk);
2774 } else {
2775 __skb_dequeue(&sk->sk_receive_queue);
2776 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2777 skb_set_owner_r(buf, new_sk);
2778 }
2779 __tipc_sendstream(new_sock, &m, 0);
2780 release_sock(new_sk);
2781 exit:
2782 release_sock(sk);
2783 return res;
2784 }
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795 static int tipc_shutdown(struct socket *sock, int how)
2796 {
2797 struct sock *sk = sock->sk;
2798 int res;
2799
2800 if (how != SHUT_RDWR)
2801 return -EINVAL;
2802
2803 lock_sock(sk);
2804
2805 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2806 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2807 sk->sk_shutdown = SHUTDOWN_MASK;
2808
2809 if (sk->sk_state == TIPC_DISCONNECTING) {
2810
2811 __skb_queue_purge(&sk->sk_receive_queue);
2812
2813 res = 0;
2814 } else {
2815 res = -ENOTCONN;
2816 }
2817
2818 sk->sk_state_change(sk);
2819
2820 release_sock(sk);
2821 return res;
2822 }
2823
2824 static void tipc_sk_check_probing_state(struct sock *sk,
2825 struct sk_buff_head *list)
2826 {
2827 struct tipc_sock *tsk = tipc_sk(sk);
2828 u32 pnode = tsk_peer_node(tsk);
2829 u32 pport = tsk_peer_port(tsk);
2830 u32 self = tsk_own_node(tsk);
2831 u32 oport = tsk->portid;
2832 struct sk_buff *skb;
2833
2834 if (tsk->probe_unacked) {
2835 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2836 sk->sk_err = ECONNABORTED;
2837 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2838 sk->sk_state_change(sk);
2839 return;
2840 }
2841
2842 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2843 pnode, self, pport, oport, TIPC_OK);
2844 if (skb)
2845 __skb_queue_tail(list, skb);
2846 tsk->probe_unacked = true;
2847 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2848 }
2849
2850 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2851 {
2852 struct tipc_sock *tsk = tipc_sk(sk);
2853
2854
2855 if (tsk->cong_link_cnt) {
2856 sk_reset_timer(sk, &sk->sk_timer,
2857 jiffies + msecs_to_jiffies(100));
2858 return;
2859 }
2860
2861 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2862 }
2863
2864 static void tipc_sk_timeout(struct timer_list *t)
2865 {
2866 struct sock *sk = from_timer(sk, t, sk_timer);
2867 struct tipc_sock *tsk = tipc_sk(sk);
2868 u32 pnode = tsk_peer_node(tsk);
2869 struct sk_buff_head list;
2870 int rc = 0;
2871
2872 __skb_queue_head_init(&list);
2873 bh_lock_sock(sk);
2874
2875
2876 if (sock_owned_by_user(sk)) {
2877 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2878 bh_unlock_sock(sk);
2879 sock_put(sk);
2880 return;
2881 }
2882
2883 if (sk->sk_state == TIPC_ESTABLISHED)
2884 tipc_sk_check_probing_state(sk, &list);
2885 else if (sk->sk_state == TIPC_CONNECTING)
2886 tipc_sk_retry_connect(sk, &list);
2887
2888 bh_unlock_sock(sk);
2889
2890 if (!skb_queue_empty(&list))
2891 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2892
2893
2894 if (rc == -ELINKCONG) {
2895 tipc_dest_push(&tsk->cong_links, pnode, 0);
2896 tsk->cong_link_cnt = 1;
2897 }
2898 sock_put(sk);
2899 }
2900
2901 static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2902 {
2903 struct sock *sk = &tsk->sk;
2904 struct net *net = sock_net(sk);
2905 struct tipc_socket_addr skaddr;
2906 struct publication *p;
2907 u32 key;
2908
2909 if (tipc_sk_connected(sk))
2910 return -EINVAL;
2911 key = tsk->portid + tsk->pub_count + 1;
2912 if (key == tsk->portid)
2913 return -EADDRINUSE;
2914 skaddr.ref = tsk->portid;
2915 skaddr.node = tipc_own_addr(net);
2916 p = tipc_nametbl_publish(net, ua, &skaddr, key);
2917 if (unlikely(!p))
2918 return -EINVAL;
2919
2920 list_add(&p->binding_sock, &tsk->publications);
2921 tsk->pub_count++;
2922 tsk->published = true;
2923 return 0;
2924 }
2925
2926 static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2927 {
2928 struct net *net = sock_net(&tsk->sk);
2929 struct publication *safe, *p;
2930 struct tipc_uaddr _ua;
2931 int rc = -EINVAL;
2932
2933 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2934 if (!ua) {
2935 tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
2936 p->sr.type, p->sr.lower, p->sr.upper);
2937 tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
2938 continue;
2939 }
2940
2941 if (p->scope != ua->scope)
2942 continue;
2943 if (p->sr.type != ua->sr.type)
2944 continue;
2945 if (p->sr.lower != ua->sr.lower)
2946 continue;
2947 if (p->sr.upper != ua->sr.upper)
2948 break;
2949 tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
2950 rc = 0;
2951 break;
2952 }
2953 if (list_empty(&tsk->publications)) {
2954 tsk->published = 0;
2955 rc = 0;
2956 }
2957 return rc;
2958 }
2959
2960
2961
2962
2963 void tipc_sk_reinit(struct net *net)
2964 {
2965 struct tipc_net *tn = net_generic(net, tipc_net_id);
2966 struct rhashtable_iter iter;
2967 struct tipc_sock *tsk;
2968 struct tipc_msg *msg;
2969
2970 rhashtable_walk_enter(&tn->sk_rht, &iter);
2971
2972 do {
2973 rhashtable_walk_start(&iter);
2974
2975 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2976 sock_hold(&tsk->sk);
2977 rhashtable_walk_stop(&iter);
2978 lock_sock(&tsk->sk);
2979 msg = &tsk->phdr;
2980 msg_set_prevnode(msg, tipc_own_addr(net));
2981 msg_set_orignode(msg, tipc_own_addr(net));
2982 release_sock(&tsk->sk);
2983 rhashtable_walk_start(&iter);
2984 sock_put(&tsk->sk);
2985 }
2986
2987 rhashtable_walk_stop(&iter);
2988 } while (tsk == ERR_PTR(-EAGAIN));
2989
2990 rhashtable_walk_exit(&iter);
2991 }
2992
2993 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2994 {
2995 struct tipc_net *tn = net_generic(net, tipc_net_id);
2996 struct tipc_sock *tsk;
2997
2998 rcu_read_lock();
2999 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
3000 if (tsk)
3001 sock_hold(&tsk->sk);
3002 rcu_read_unlock();
3003
3004 return tsk;
3005 }
3006
3007 static int tipc_sk_insert(struct tipc_sock *tsk)
3008 {
3009 struct sock *sk = &tsk->sk;
3010 struct net *net = sock_net(sk);
3011 struct tipc_net *tn = net_generic(net, tipc_net_id);
3012 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
3013 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
3014
3015 while (remaining--) {
3016 portid++;
3017 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
3018 portid = TIPC_MIN_PORT;
3019 tsk->portid = portid;
3020 sock_hold(&tsk->sk);
3021 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3022 tsk_rht_params))
3023 return 0;
3024 sock_put(&tsk->sk);
3025 }
3026
3027 return -1;
3028 }
3029
3030 static void tipc_sk_remove(struct tipc_sock *tsk)
3031 {
3032 struct sock *sk = &tsk->sk;
3033 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
3034
3035 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3036 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
3037 __sock_put(sk);
3038 }
3039 }
3040
3041 static const struct rhashtable_params tsk_rht_params = {
3042 .nelem_hint = 192,
3043 .head_offset = offsetof(struct tipc_sock, node),
3044 .key_offset = offsetof(struct tipc_sock, portid),
3045 .key_len = sizeof(u32),
3046 .max_size = 1048576,
3047 .min_size = 256,
3048 .automatic_shrinking = true,
3049 };
3050
3051 int tipc_sk_rht_init(struct net *net)
3052 {
3053 struct tipc_net *tn = net_generic(net, tipc_net_id);
3054
3055 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
3056 }
3057
3058 void tipc_sk_rht_destroy(struct net *net)
3059 {
3060 struct tipc_net *tn = net_generic(net, tipc_net_id);
3061
3062
3063 synchronize_net();
3064
3065 rhashtable_destroy(&tn->sk_rht);
3066 }
3067
3068 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3069 {
3070 struct net *net = sock_net(&tsk->sk);
3071 struct tipc_group *grp = tsk->group;
3072 struct tipc_msg *hdr = &tsk->phdr;
3073 struct tipc_uaddr ua;
3074 int rc;
3075
3076 if (mreq->type < TIPC_RESERVED_TYPES)
3077 return -EACCES;
3078 if (mreq->scope > TIPC_NODE_SCOPE)
3079 return -EINVAL;
3080 if (mreq->scope != TIPC_NODE_SCOPE)
3081 mreq->scope = TIPC_CLUSTER_SCOPE;
3082 if (grp)
3083 return -EACCES;
3084 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3085 if (!grp)
3086 return -ENOMEM;
3087 tsk->group = grp;
3088 msg_set_lookup_scope(hdr, mreq->scope);
3089 msg_set_nametype(hdr, mreq->type);
3090 msg_set_dest_droppable(hdr, true);
3091 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
3092 mreq->type, mreq->instance, mreq->instance);
3093 tipc_nametbl_build_group(net, grp, &ua);
3094 rc = tipc_sk_publish(tsk, &ua);
3095 if (rc) {
3096 tipc_group_delete(net, grp);
3097 tsk->group = NULL;
3098 return rc;
3099 }
3100
3101 tsk->mc_method.rcast = true;
3102 tsk->mc_method.mandatory = true;
3103 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3104 return rc;
3105 }
3106
3107 static int tipc_sk_leave(struct tipc_sock *tsk)
3108 {
3109 struct net *net = sock_net(&tsk->sk);
3110 struct tipc_group *grp = tsk->group;
3111 struct tipc_uaddr ua;
3112 int scope;
3113
3114 if (!grp)
3115 return -EINVAL;
3116 ua.addrtype = TIPC_SERVICE_RANGE;
3117 tipc_group_self(grp, &ua.sr, &scope);
3118 ua.scope = scope;
3119 tipc_group_delete(net, grp);
3120 tsk->group = NULL;
3121 tipc_sk_withdraw(tsk, &ua);
3122 return 0;
3123 }
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
3139 sockptr_t ov, unsigned int ol)
3140 {
3141 struct sock *sk = sock->sk;
3142 struct tipc_sock *tsk = tipc_sk(sk);
3143 struct tipc_group_req mreq;
3144 u32 value = 0;
3145 int res = 0;
3146
3147 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3148 return 0;
3149 if (lvl != SOL_TIPC)
3150 return -ENOPROTOOPT;
3151
3152 switch (opt) {
3153 case TIPC_IMPORTANCE:
3154 case TIPC_SRC_DROPPABLE:
3155 case TIPC_DEST_DROPPABLE:
3156 case TIPC_CONN_TIMEOUT:
3157 case TIPC_NODELAY:
3158 if (ol < sizeof(value))
3159 return -EINVAL;
3160 if (copy_from_sockptr(&value, ov, sizeof(u32)))
3161 return -EFAULT;
3162 break;
3163 case TIPC_GROUP_JOIN:
3164 if (ol < sizeof(mreq))
3165 return -EINVAL;
3166 if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
3167 return -EFAULT;
3168 break;
3169 default:
3170 if (!sockptr_is_null(ov) || ol)
3171 return -EINVAL;
3172 }
3173
3174 lock_sock(sk);
3175
3176 switch (opt) {
3177 case TIPC_IMPORTANCE:
3178 res = tsk_set_importance(sk, value);
3179 break;
3180 case TIPC_SRC_DROPPABLE:
3181 if (sock->type != SOCK_STREAM)
3182 tsk_set_unreliable(tsk, value);
3183 else
3184 res = -ENOPROTOOPT;
3185 break;
3186 case TIPC_DEST_DROPPABLE:
3187 tsk_set_unreturnable(tsk, value);
3188 break;
3189 case TIPC_CONN_TIMEOUT:
3190 tipc_sk(sk)->conn_timeout = value;
3191 break;
3192 case TIPC_MCAST_BROADCAST:
3193 tsk->mc_method.rcast = false;
3194 tsk->mc_method.mandatory = true;
3195 break;
3196 case TIPC_MCAST_REPLICAST:
3197 tsk->mc_method.rcast = true;
3198 tsk->mc_method.mandatory = true;
3199 break;
3200 case TIPC_GROUP_JOIN:
3201 res = tipc_sk_join(tsk, &mreq);
3202 break;
3203 case TIPC_GROUP_LEAVE:
3204 res = tipc_sk_leave(tsk);
3205 break;
3206 case TIPC_NODELAY:
3207 tsk->nodelay = !!value;
3208 tsk_set_nagle(tsk);
3209 break;
3210 default:
3211 res = -EINVAL;
3212 }
3213
3214 release_sock(sk);
3215
3216 return res;
3217 }
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3233 char __user *ov, int __user *ol)
3234 {
3235 struct sock *sk = sock->sk;
3236 struct tipc_sock *tsk = tipc_sk(sk);
3237 struct tipc_service_range seq;
3238 int len, scope;
3239 u32 value;
3240 int res;
3241
3242 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3243 return put_user(0, ol);
3244 if (lvl != SOL_TIPC)
3245 return -ENOPROTOOPT;
3246 res = get_user(len, ol);
3247 if (res)
3248 return res;
3249
3250 lock_sock(sk);
3251
3252 switch (opt) {
3253 case TIPC_IMPORTANCE:
3254 value = tsk_importance(tsk);
3255 break;
3256 case TIPC_SRC_DROPPABLE:
3257 value = tsk_unreliable(tsk);
3258 break;
3259 case TIPC_DEST_DROPPABLE:
3260 value = tsk_unreturnable(tsk);
3261 break;
3262 case TIPC_CONN_TIMEOUT:
3263 value = tsk->conn_timeout;
3264
3265 break;
3266 case TIPC_NODE_RECVQ_DEPTH:
3267 value = 0;
3268 break;
3269 case TIPC_SOCK_RECVQ_DEPTH:
3270 value = skb_queue_len(&sk->sk_receive_queue);
3271 break;
3272 case TIPC_SOCK_RECVQ_USED:
3273 value = sk_rmem_alloc_get(sk);
3274 break;
3275 case TIPC_GROUP_JOIN:
3276 seq.type = 0;
3277 if (tsk->group)
3278 tipc_group_self(tsk->group, &seq, &scope);
3279 value = seq.type;
3280 break;
3281 default:
3282 res = -EINVAL;
3283 }
3284
3285 release_sock(sk);
3286
3287 if (res)
3288 return res;
3289
3290 if (len < sizeof(value))
3291 return -EINVAL;
3292
3293 if (copy_to_user(ov, &value, sizeof(value)))
3294 return -EFAULT;
3295
3296 return put_user(sizeof(value), ol);
3297 }
3298
3299 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3300 {
3301 struct net *net = sock_net(sock->sk);
3302 struct tipc_sioc_nodeid_req nr = {0};
3303 struct tipc_sioc_ln_req lnr;
3304 void __user *argp = (void __user *)arg;
3305
3306 switch (cmd) {
3307 case SIOCGETLINKNAME:
3308 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3309 return -EFAULT;
3310 if (!tipc_node_get_linkname(net,
3311 lnr.bearer_id & 0xffff, lnr.peer,
3312 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3313 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3314 return -EFAULT;
3315 return 0;
3316 }
3317 return -EADDRNOTAVAIL;
3318 case SIOCGETNODEID:
3319 if (copy_from_user(&nr, argp, sizeof(nr)))
3320 return -EFAULT;
3321 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3322 return -EADDRNOTAVAIL;
3323 if (copy_to_user(argp, &nr, sizeof(nr)))
3324 return -EFAULT;
3325 return 0;
3326 default:
3327 return -ENOIOCTLCMD;
3328 }
3329 }
3330
3331 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3332 {
3333 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3334 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3335 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3336
3337 tsk1->peer.family = AF_TIPC;
3338 tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
3339 tsk1->peer.scope = TIPC_NODE_SCOPE;
3340 tsk1->peer.addr.id.ref = tsk2->portid;
3341 tsk1->peer.addr.id.node = onode;
3342 tsk2->peer.family = AF_TIPC;
3343 tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
3344 tsk2->peer.scope = TIPC_NODE_SCOPE;
3345 tsk2->peer.addr.id.ref = tsk1->portid;
3346 tsk2->peer.addr.id.node = onode;
3347
3348 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3349 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3350 return 0;
3351 }
3352
3353
3354
3355 static const struct proto_ops msg_ops = {
3356 .owner = THIS_MODULE,
3357 .family = AF_TIPC,
3358 .release = tipc_release,
3359 .bind = tipc_bind,
3360 .connect = tipc_connect,
3361 .socketpair = tipc_socketpair,
3362 .accept = sock_no_accept,
3363 .getname = tipc_getname,
3364 .poll = tipc_poll,
3365 .ioctl = tipc_ioctl,
3366 .listen = sock_no_listen,
3367 .shutdown = tipc_shutdown,
3368 .setsockopt = tipc_setsockopt,
3369 .getsockopt = tipc_getsockopt,
3370 .sendmsg = tipc_sendmsg,
3371 .recvmsg = tipc_recvmsg,
3372 .mmap = sock_no_mmap,
3373 .sendpage = sock_no_sendpage
3374 };
3375
3376 static const struct proto_ops packet_ops = {
3377 .owner = THIS_MODULE,
3378 .family = AF_TIPC,
3379 .release = tipc_release,
3380 .bind = tipc_bind,
3381 .connect = tipc_connect,
3382 .socketpair = tipc_socketpair,
3383 .accept = tipc_accept,
3384 .getname = tipc_getname,
3385 .poll = tipc_poll,
3386 .ioctl = tipc_ioctl,
3387 .listen = tipc_listen,
3388 .shutdown = tipc_shutdown,
3389 .setsockopt = tipc_setsockopt,
3390 .getsockopt = tipc_getsockopt,
3391 .sendmsg = tipc_send_packet,
3392 .recvmsg = tipc_recvmsg,
3393 .mmap = sock_no_mmap,
3394 .sendpage = sock_no_sendpage
3395 };
3396
3397 static const struct proto_ops stream_ops = {
3398 .owner = THIS_MODULE,
3399 .family = AF_TIPC,
3400 .release = tipc_release,
3401 .bind = tipc_bind,
3402 .connect = tipc_connect,
3403 .socketpair = tipc_socketpair,
3404 .accept = tipc_accept,
3405 .getname = tipc_getname,
3406 .poll = tipc_poll,
3407 .ioctl = tipc_ioctl,
3408 .listen = tipc_listen,
3409 .shutdown = tipc_shutdown,
3410 .setsockopt = tipc_setsockopt,
3411 .getsockopt = tipc_getsockopt,
3412 .sendmsg = tipc_sendstream,
3413 .recvmsg = tipc_recvstream,
3414 .mmap = sock_no_mmap,
3415 .sendpage = sock_no_sendpage
3416 };
3417
3418 static const struct net_proto_family tipc_family_ops = {
3419 .owner = THIS_MODULE,
3420 .family = AF_TIPC,
3421 .create = tipc_sk_create
3422 };
3423
3424 static struct proto tipc_proto = {
3425 .name = "TIPC",
3426 .owner = THIS_MODULE,
3427 .obj_size = sizeof(struct tipc_sock),
3428 .sysctl_rmem = sysctl_tipc_rmem
3429 };
3430
3431
3432
3433
3434
3435
3436 int tipc_socket_init(void)
3437 {
3438 int res;
3439
3440 res = proto_register(&tipc_proto, 1);
3441 if (res) {
3442 pr_err("Failed to register TIPC protocol type\n");
3443 goto out;
3444 }
3445
3446 res = sock_register(&tipc_family_ops);
3447 if (res) {
3448 pr_err("Failed to register TIPC socket type\n");
3449 proto_unregister(&tipc_proto);
3450 goto out;
3451 }
3452 out:
3453 return res;
3454 }
3455
3456
3457
3458
3459 void tipc_socket_stop(void)
3460 {
3461 sock_unregister(tipc_family_ops.family);
3462 proto_unregister(&tipc_proto);
3463 }
3464
3465
3466 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3467 {
3468 u32 peer_node, peer_port;
3469 u32 conn_type, conn_instance;
3470 struct nlattr *nest;
3471
3472 peer_node = tsk_peer_node(tsk);
3473 peer_port = tsk_peer_port(tsk);
3474 conn_type = msg_nametype(&tsk->phdr);
3475 conn_instance = msg_nameinst(&tsk->phdr);
3476 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3477 if (!nest)
3478 return -EMSGSIZE;
3479
3480 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3481 goto msg_full;
3482 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3483 goto msg_full;
3484
3485 if (tsk->conn_addrtype != 0) {
3486 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3487 goto msg_full;
3488 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
3489 goto msg_full;
3490 if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
3491 goto msg_full;
3492 }
3493 nla_nest_end(skb, nest);
3494
3495 return 0;
3496
3497 msg_full:
3498 nla_nest_cancel(skb, nest);
3499
3500 return -EMSGSIZE;
3501 }
3502
3503 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3504 *tsk)
3505 {
3506 struct net *net = sock_net(skb->sk);
3507 struct sock *sk = &tsk->sk;
3508
3509 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3510 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3511 return -EMSGSIZE;
3512
3513 if (tipc_sk_connected(sk)) {
3514 if (__tipc_nl_add_sk_con(skb, tsk))
3515 return -EMSGSIZE;
3516 } else if (!list_empty(&tsk->publications)) {
3517 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3518 return -EMSGSIZE;
3519 }
3520 return 0;
3521 }
3522
3523
3524 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3525 struct tipc_sock *tsk)
3526 {
3527 struct nlattr *attrs;
3528 void *hdr;
3529
3530 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3531 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3532 if (!hdr)
3533 goto msg_cancel;
3534
3535 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3536 if (!attrs)
3537 goto genlmsg_cancel;
3538
3539 if (__tipc_nl_add_sk_info(skb, tsk))
3540 goto attr_msg_cancel;
3541
3542 nla_nest_end(skb, attrs);
3543 genlmsg_end(skb, hdr);
3544
3545 return 0;
3546
3547 attr_msg_cancel:
3548 nla_nest_cancel(skb, attrs);
3549 genlmsg_cancel:
3550 genlmsg_cancel(skb, hdr);
3551 msg_cancel:
3552 return -EMSGSIZE;
3553 }
3554
3555 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3556 int (*skb_handler)(struct sk_buff *skb,
3557 struct netlink_callback *cb,
3558 struct tipc_sock *tsk))
3559 {
3560 struct rhashtable_iter *iter = (void *)cb->args[4];
3561 struct tipc_sock *tsk;
3562 int err;
3563
3564 rhashtable_walk_start(iter);
3565 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3566 if (IS_ERR(tsk)) {
3567 err = PTR_ERR(tsk);
3568 if (err == -EAGAIN) {
3569 err = 0;
3570 continue;
3571 }
3572 break;
3573 }
3574
3575 sock_hold(&tsk->sk);
3576 rhashtable_walk_stop(iter);
3577 lock_sock(&tsk->sk);
3578 err = skb_handler(skb, cb, tsk);
3579 if (err) {
3580 release_sock(&tsk->sk);
3581 sock_put(&tsk->sk);
3582 goto out;
3583 }
3584 release_sock(&tsk->sk);
3585 rhashtable_walk_start(iter);
3586 sock_put(&tsk->sk);
3587 }
3588 rhashtable_walk_stop(iter);
3589 out:
3590 return skb->len;
3591 }
3592 EXPORT_SYMBOL(tipc_nl_sk_walk);
3593
3594 int tipc_dump_start(struct netlink_callback *cb)
3595 {
3596 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3597 }
3598 EXPORT_SYMBOL(tipc_dump_start);
3599
3600 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3601 {
3602
3603 struct rhashtable_iter *iter = (void *)cb->args[4];
3604 struct tipc_net *tn = tipc_net(net);
3605
3606 if (!iter) {
3607 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3608 if (!iter)
3609 return -ENOMEM;
3610
3611 cb->args[4] = (long)iter;
3612 }
3613
3614 rhashtable_walk_enter(&tn->sk_rht, iter);
3615 return 0;
3616 }
3617
3618 int tipc_dump_done(struct netlink_callback *cb)
3619 {
3620 struct rhashtable_iter *hti = (void *)cb->args[4];
3621
3622 rhashtable_walk_exit(hti);
3623 kfree(hti);
3624 return 0;
3625 }
3626 EXPORT_SYMBOL(tipc_dump_done);
3627
3628 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3629 struct tipc_sock *tsk, u32 sk_filter_state,
3630 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3631 {
3632 struct sock *sk = &tsk->sk;
3633 struct nlattr *attrs;
3634 struct nlattr *stat;
3635
3636
3637 if (!(sk_filter_state & (1 << sk->sk_state)))
3638 return 0;
3639
3640 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3641 if (!attrs)
3642 goto msg_cancel;
3643
3644 if (__tipc_nl_add_sk_info(skb, tsk))
3645 goto attr_msg_cancel;
3646
3647 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3648 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3649 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3650 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3651 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3652 sock_i_uid(sk))) ||
3653 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3654 tipc_diag_gen_cookie(sk),
3655 TIPC_NLA_SOCK_PAD))
3656 goto attr_msg_cancel;
3657
3658 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3659 if (!stat)
3660 goto attr_msg_cancel;
3661
3662 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3663 skb_queue_len(&sk->sk_receive_queue)) ||
3664 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3665 skb_queue_len(&sk->sk_write_queue)) ||
3666 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3667 atomic_read(&sk->sk_drops)))
3668 goto stat_msg_cancel;
3669
3670 if (tsk->cong_link_cnt &&
3671 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3672 goto stat_msg_cancel;
3673
3674 if (tsk_conn_cong(tsk) &&
3675 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3676 goto stat_msg_cancel;
3677
3678 nla_nest_end(skb, stat);
3679
3680 if (tsk->group)
3681 if (tipc_group_fill_sock_diag(tsk->group, skb))
3682 goto stat_msg_cancel;
3683
3684 nla_nest_end(skb, attrs);
3685
3686 return 0;
3687
3688 stat_msg_cancel:
3689 nla_nest_cancel(skb, stat);
3690 attr_msg_cancel:
3691 nla_nest_cancel(skb, attrs);
3692 msg_cancel:
3693 return -EMSGSIZE;
3694 }
3695 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3696
3697 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3698 {
3699 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3700 }
3701
3702
3703 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3704 struct netlink_callback *cb,
3705 struct publication *publ)
3706 {
3707 void *hdr;
3708 struct nlattr *attrs;
3709
3710 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3711 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3712 if (!hdr)
3713 goto msg_cancel;
3714
3715 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3716 if (!attrs)
3717 goto genlmsg_cancel;
3718
3719 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3720 goto attr_msg_cancel;
3721 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
3722 goto attr_msg_cancel;
3723 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
3724 goto attr_msg_cancel;
3725 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
3726 goto attr_msg_cancel;
3727
3728 nla_nest_end(skb, attrs);
3729 genlmsg_end(skb, hdr);
3730
3731 return 0;
3732
3733 attr_msg_cancel:
3734 nla_nest_cancel(skb, attrs);
3735 genlmsg_cancel:
3736 genlmsg_cancel(skb, hdr);
3737 msg_cancel:
3738 return -EMSGSIZE;
3739 }
3740
3741
3742 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3743 struct netlink_callback *cb,
3744 struct tipc_sock *tsk, u32 *last_publ)
3745 {
3746 int err;
3747 struct publication *p;
3748
3749 if (*last_publ) {
3750 list_for_each_entry(p, &tsk->publications, binding_sock) {
3751 if (p->key == *last_publ)
3752 break;
3753 }
3754 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3755
3756
3757
3758
3759
3760
3761 cb->prev_seq = 1;
3762 *last_publ = 0;
3763 return -EPIPE;
3764 }
3765 } else {
3766 p = list_first_entry(&tsk->publications, struct publication,
3767 binding_sock);
3768 }
3769
3770 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3771 err = __tipc_nl_add_sk_publ(skb, cb, p);
3772 if (err) {
3773 *last_publ = p->key;
3774 return err;
3775 }
3776 }
3777 *last_publ = 0;
3778
3779 return 0;
3780 }
3781
3782 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3783 {
3784 int err;
3785 u32 tsk_portid = cb->args[0];
3786 u32 last_publ = cb->args[1];
3787 u32 done = cb->args[2];
3788 struct net *net = sock_net(skb->sk);
3789 struct tipc_sock *tsk;
3790
3791 if (!tsk_portid) {
3792 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
3793 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3794
3795 if (!attrs[TIPC_NLA_SOCK])
3796 return -EINVAL;
3797
3798 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3799 attrs[TIPC_NLA_SOCK],
3800 tipc_nl_sock_policy, NULL);
3801 if (err)
3802 return err;
3803
3804 if (!sock[TIPC_NLA_SOCK_REF])
3805 return -EINVAL;
3806
3807 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3808 }
3809
3810 if (done)
3811 return 0;
3812
3813 tsk = tipc_sk_lookup(net, tsk_portid);
3814 if (!tsk)
3815 return -EINVAL;
3816
3817 lock_sock(&tsk->sk);
3818 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3819 if (!err)
3820 done = 1;
3821 release_sock(&tsk->sk);
3822 sock_put(&tsk->sk);
3823
3824 cb->args[0] = tsk_portid;
3825 cb->args[1] = last_publ;
3826 cb->args[2] = done;
3827
3828 return skb->len;
3829 }
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842 bool tipc_sk_filtering(struct sock *sk)
3843 {
3844 struct tipc_sock *tsk;
3845 struct publication *p;
3846 u32 _port, _sktype, _type, _lower, _upper;
3847 u32 type = 0, lower = 0, upper = 0;
3848
3849 if (!sk)
3850 return true;
3851
3852 tsk = tipc_sk(sk);
3853
3854 _port = sysctl_tipc_sk_filter[0];
3855 _sktype = sysctl_tipc_sk_filter[1];
3856 _type = sysctl_tipc_sk_filter[2];
3857 _lower = sysctl_tipc_sk_filter[3];
3858 _upper = sysctl_tipc_sk_filter[4];
3859
3860 if (!_port && !_sktype && !_type && !_lower && !_upper)
3861 return true;
3862
3863 if (_port)
3864 return (_port == tsk->portid);
3865
3866 if (_sktype && _sktype != sk->sk_type)
3867 return false;
3868
3869 if (tsk->published) {
3870 p = list_first_entry_or_null(&tsk->publications,
3871 struct publication, binding_sock);
3872 if (p) {
3873 type = p->sr.type;
3874 lower = p->sr.lower;
3875 upper = p->sr.upper;
3876 }
3877 }
3878
3879 if (!tipc_sk_type_connectionless(sk)) {
3880 type = msg_nametype(&tsk->phdr);
3881 lower = msg_nameinst(&tsk->phdr);
3882 upper = lower;
3883 }
3884
3885 if ((_type && _type != type) || (_lower && _lower != lower) ||
3886 (_upper && _upper != upper))
3887 return false;
3888
3889 return true;
3890 }
3891
3892 u32 tipc_sock_get_portid(struct sock *sk)
3893 {
3894 return (sk) ? (tipc_sk(sk))->portid : 0;
3895 }
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3907 {
3908 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3909 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3910 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3911
3912 return (qsize > lim * 90 / 100);
3913 }
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3925 {
3926 unsigned int lim = rcvbuf_limit(sk, skb);
3927 unsigned int qsize = sk_rmem_alloc_get(sk);
3928
3929 return (qsize > lim * 90 / 100);
3930 }
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3944 {
3945 int i = 0;
3946 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3947 u32 conn_type, conn_instance;
3948 struct tipc_sock *tsk;
3949 struct publication *p;
3950 bool tsk_connected;
3951
3952 if (!sk) {
3953 i += scnprintf(buf, sz, "sk data: (null)\n");
3954 return i;
3955 }
3956
3957 tsk = tipc_sk(sk);
3958 tsk_connected = !tipc_sk_type_connectionless(sk);
3959
3960 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3961 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3962 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3963 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3964 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3965 if (tsk_connected) {
3966 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3967 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3968 conn_type = msg_nametype(&tsk->phdr);
3969 conn_instance = msg_nameinst(&tsk->phdr);
3970 i += scnprintf(buf + i, sz - i, " %u", conn_type);
3971 i += scnprintf(buf + i, sz - i, " %u", conn_instance);
3972 }
3973 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3974 if (tsk->published) {
3975 p = list_first_entry_or_null(&tsk->publications,
3976 struct publication, binding_sock);
3977 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
3978 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
3979 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
3980 }
3981 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3982 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3983 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3984 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3985 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3986 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3987 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3988 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3989 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3990 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3991 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3992 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3993 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3994 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3995
3996 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3997 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3998 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3999 }
4000
4001 if (dqueues & TIPC_DUMP_SK_RCVQ) {
4002 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
4003 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
4004 }
4005
4006 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
4007 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
4008 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
4009 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
4010 i += scnprintf(buf + i, sz - i, " tail ");
4011 i += tipc_skb_dump(sk->sk_backlog.tail, false,
4012 buf + i);
4013 }
4014 }
4015
4016 return i;
4017 }