0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include <net/sock.h>
0038 #include "core.h"
0039 #include "msg.h"
0040 #include "addr.h"
0041 #include "name_table.h"
0042 #include "crypto.h"
0043
0044 #define BUF_ALIGN(x) ALIGN(x, 4)
0045 #define MAX_FORWARD_SIZE 1024
0046 #ifdef CONFIG_TIPC_CRYPTO
0047 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
0048 #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
0049 #else
0050 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
0051 #define BUF_OVERHEAD BUF_HEADROOM
0052 #endif
0053
0054 const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
0055 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
0069 {
0070 struct sk_buff *skb;
0071
0072 skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
0073 if (skb) {
0074 skb_reserve(skb, BUF_HEADROOM);
0075 skb_put(skb, size);
0076 skb->next = NULL;
0077 }
0078 return skb;
0079 }
0080
0081 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
0082 u32 hsize, u32 dnode)
0083 {
0084 memset(m, 0, hsize);
0085 msg_set_version(m);
0086 msg_set_user(m, user);
0087 msg_set_hdr_sz(m, hsize);
0088 msg_set_size(m, hsize);
0089 msg_set_prevnode(m, own_node);
0090 msg_set_type(m, type);
0091 if (hsize > SHORT_H_SIZE) {
0092 msg_set_orignode(m, own_node);
0093 msg_set_destnode(m, dnode);
0094 }
0095 }
0096
0097 struct sk_buff *tipc_msg_create(uint user, uint type,
0098 uint hdr_sz, uint data_sz, u32 dnode,
0099 u32 onode, u32 dport, u32 oport, int errcode)
0100 {
0101 struct tipc_msg *msg;
0102 struct sk_buff *buf;
0103
0104 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
0105 if (unlikely(!buf))
0106 return NULL;
0107
0108 msg = buf_msg(buf);
0109 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
0110 msg_set_size(msg, hdr_sz + data_sz);
0111 msg_set_origport(msg, oport);
0112 msg_set_destport(msg, dport);
0113 msg_set_errcode(msg, errcode);
0114 return buf;
0115 }
0116
0117
0118
0119
0120
0121
0122
0123
0124 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
0125 {
0126 struct sk_buff *head = *headbuf;
0127 struct sk_buff *frag = *buf;
0128 struct sk_buff *tail = NULL;
0129 struct tipc_msg *msg;
0130 u32 fragid;
0131 int delta;
0132 bool headstolen;
0133
0134 if (!frag)
0135 goto err;
0136
0137 msg = buf_msg(frag);
0138 fragid = msg_type(msg);
0139 frag->next = NULL;
0140 skb_pull(frag, msg_hdr_sz(msg));
0141
0142 if (fragid == FIRST_FRAGMENT) {
0143 if (unlikely(head))
0144 goto err;
0145 *buf = NULL;
0146 if (skb_has_frag_list(frag) && __skb_linearize(frag))
0147 goto err;
0148 frag = skb_unshare(frag, GFP_ATOMIC);
0149 if (unlikely(!frag))
0150 goto err;
0151 head = *headbuf = frag;
0152 TIPC_SKB_CB(head)->tail = NULL;
0153 return 0;
0154 }
0155
0156 if (!head)
0157 goto err;
0158
0159 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
0160 kfree_skb_partial(frag, headstolen);
0161 } else {
0162 tail = TIPC_SKB_CB(head)->tail;
0163 if (!skb_has_frag_list(head))
0164 skb_shinfo(head)->frag_list = frag;
0165 else
0166 tail->next = frag;
0167 head->truesize += frag->truesize;
0168 head->data_len += frag->len;
0169 head->len += frag->len;
0170 TIPC_SKB_CB(head)->tail = frag;
0171 }
0172
0173 if (fragid == LAST_FRAGMENT) {
0174 TIPC_SKB_CB(head)->validated = 0;
0175 if (unlikely(!tipc_msg_validate(&head)))
0176 goto err;
0177 *buf = head;
0178 TIPC_SKB_CB(head)->tail = NULL;
0179 *headbuf = NULL;
0180 return 1;
0181 }
0182 *buf = NULL;
0183 return 0;
0184 err:
0185 kfree_skb(*buf);
0186 kfree_skb(*headbuf);
0187 *buf = *headbuf = NULL;
0188 return 0;
0189 }
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
0202 int mss, struct sk_buff_head *txq)
0203 {
0204 struct sk_buff *skb;
0205 int accounted, total, curr;
0206 int mlen, cpy, rem = dlen;
0207 struct tipc_msg *hdr;
0208
0209 skb = skb_peek_tail(txq);
0210 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
0211 total = accounted;
0212
0213 do {
0214 if (!skb || skb->len >= mss) {
0215 skb = tipc_buf_acquire(mss, GFP_KERNEL);
0216 if (unlikely(!skb))
0217 return -ENOMEM;
0218 skb_orphan(skb);
0219 skb_trim(skb, MIN_H_SIZE);
0220 hdr = buf_msg(skb);
0221 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
0222 msg_set_hdr_sz(hdr, MIN_H_SIZE);
0223 msg_set_size(hdr, MIN_H_SIZE);
0224 __skb_queue_tail(txq, skb);
0225 total += 1;
0226 }
0227 hdr = buf_msg(skb);
0228 curr = msg_blocks(hdr);
0229 mlen = msg_size(hdr);
0230 cpy = min_t(size_t, rem, mss - mlen);
0231 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
0232 return -EFAULT;
0233 msg_set_size(hdr, mlen + cpy);
0234 skb_put(skb, cpy);
0235 rem -= cpy;
0236 total += msg_blocks(hdr) - curr;
0237 } while (rem > 0);
0238 return total - accounted;
0239 }
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 bool tipc_msg_validate(struct sk_buff **_skb)
0253 {
0254 struct sk_buff *skb = *_skb;
0255 struct tipc_msg *hdr;
0256 int msz, hsz;
0257
0258
0259 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
0260 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
0261 if (!skb)
0262 return false;
0263 kfree_skb(*_skb);
0264 *_skb = skb;
0265 }
0266
0267 if (unlikely(TIPC_SKB_CB(skb)->validated))
0268 return true;
0269
0270 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
0271 return false;
0272
0273 hsz = msg_hdr_sz(buf_msg(skb));
0274 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
0275 return false;
0276 if (unlikely(!pskb_may_pull(skb, hsz)))
0277 return false;
0278
0279 hdr = buf_msg(skb);
0280 if (unlikely(msg_version(hdr) != TIPC_VERSION))
0281 return false;
0282
0283 msz = msg_size(hdr);
0284 if (unlikely(msz < hsz))
0285 return false;
0286 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
0287 return false;
0288 if (unlikely(skb->len < msz))
0289 return false;
0290
0291 TIPC_SKB_CB(skb)->validated = 1;
0292 return true;
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
0307 int pktmax, struct sk_buff_head *frags)
0308 {
0309 int pktno, nof_fragms, dsz, dmax, eat;
0310 struct tipc_msg *_hdr;
0311 struct sk_buff *_skb;
0312 u8 *data;
0313
0314
0315 if (skb_linearize(skb))
0316 return -ENOMEM;
0317
0318 data = (u8 *)skb->data;
0319 dsz = msg_size(buf_msg(skb));
0320 dmax = pktmax - INT_H_SIZE;
0321 if (dsz <= dmax || !dmax)
0322 return -EINVAL;
0323
0324 nof_fragms = dsz / dmax + 1;
0325 for (pktno = 1; pktno <= nof_fragms; pktno++) {
0326 if (pktno < nof_fragms)
0327 eat = dmax;
0328 else
0329 eat = dsz % dmax;
0330
0331 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
0332 if (!_skb)
0333 goto error;
0334 skb_orphan(_skb);
0335 __skb_queue_tail(frags, _skb);
0336
0337 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
0338 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
0339 data += eat;
0340
0341 _hdr = buf_msg(_skb);
0342 msg_set_fragm_no(_hdr, pktno);
0343 msg_set_nof_fragms(_hdr, nof_fragms);
0344 msg_set_size(_hdr, INT_H_SIZE + eat);
0345 }
0346 return 0;
0347
0348 error:
0349 __skb_queue_purge(frags);
0350 __skb_queue_head_init(frags);
0351 return -ENOMEM;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
0369 int dsz, int pktmax, struct sk_buff_head *list)
0370 {
0371 int mhsz = msg_hdr_sz(mhdr);
0372 struct tipc_msg pkthdr;
0373 int msz = mhsz + dsz;
0374 int pktrem = pktmax;
0375 struct sk_buff *skb;
0376 int drem = dsz;
0377 int pktno = 1;
0378 char *pktpos;
0379 int pktsz;
0380 int rc;
0381
0382 msg_set_size(mhdr, msz);
0383
0384
0385 if (likely(msz <= pktmax)) {
0386 skb = tipc_buf_acquire(msz, GFP_KERNEL);
0387
0388
0389 if (unlikely(!skb)) {
0390 if (pktmax != MAX_MSG_SIZE)
0391 return -ENOMEM;
0392 rc = tipc_msg_build(mhdr, m, offset, dsz,
0393 one_page_mtu, list);
0394 if (rc != dsz)
0395 return rc;
0396 if (tipc_msg_assemble(list))
0397 return dsz;
0398 return -ENOMEM;
0399 }
0400 skb_orphan(skb);
0401 __skb_queue_tail(list, skb);
0402 skb_copy_to_linear_data(skb, mhdr, mhsz);
0403 pktpos = skb->data + mhsz;
0404 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
0405 return dsz;
0406 rc = -EFAULT;
0407 goto error;
0408 }
0409
0410
0411 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
0412 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
0413 msg_set_size(&pkthdr, pktmax);
0414 msg_set_fragm_no(&pkthdr, pktno);
0415 msg_set_importance(&pkthdr, msg_importance(mhdr));
0416
0417
0418 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
0419 if (!skb)
0420 return -ENOMEM;
0421 skb_orphan(skb);
0422 __skb_queue_tail(list, skb);
0423 pktpos = skb->data;
0424 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
0425 pktpos += INT_H_SIZE;
0426 pktrem -= INT_H_SIZE;
0427 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
0428 pktpos += mhsz;
0429 pktrem -= mhsz;
0430
0431 do {
0432 if (drem < pktrem)
0433 pktrem = drem;
0434
0435 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
0436 rc = -EFAULT;
0437 goto error;
0438 }
0439 drem -= pktrem;
0440
0441 if (!drem)
0442 break;
0443
0444
0445 if (drem < (pktmax - INT_H_SIZE))
0446 pktsz = drem + INT_H_SIZE;
0447 else
0448 pktsz = pktmax;
0449 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
0450 if (!skb) {
0451 rc = -ENOMEM;
0452 goto error;
0453 }
0454 skb_orphan(skb);
0455 __skb_queue_tail(list, skb);
0456 msg_set_type(&pkthdr, FRAGMENT);
0457 msg_set_size(&pkthdr, pktsz);
0458 msg_set_fragm_no(&pkthdr, ++pktno);
0459 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
0460 pktpos = skb->data + INT_H_SIZE;
0461 pktrem = pktsz - INT_H_SIZE;
0462
0463 } while (1);
0464 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
0465 return dsz;
0466 error:
0467 __skb_queue_purge(list);
0468 __skb_queue_head_init(list);
0469 return rc;
0470 }
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
0481 u32 max)
0482 {
0483 struct tipc_msg *bmsg = buf_msg(bskb);
0484 u32 msz, bsz, offset, pad;
0485
0486 msz = msg_size(msg);
0487 bsz = msg_size(bmsg);
0488 offset = BUF_ALIGN(bsz);
0489 pad = offset - bsz;
0490
0491 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
0492 return false;
0493 if (unlikely(max < (offset + msz)))
0494 return false;
0495
0496 skb_put(bskb, pad + msz);
0497 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
0498 msg_set_size(bmsg, offset + msz);
0499 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
0500 return true;
0501 }
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
0517 u32 dnode, bool *new_bundle)
0518 {
0519 struct tipc_msg *msg, *inner, *outer;
0520 u32 tsz;
0521
0522
0523 msg = buf_msg(*skb);
0524 if (msg_user(msg) == MSG_FRAGMENTER)
0525 return false;
0526 if (msg_user(msg) == TUNNEL_PROTOCOL)
0527 return false;
0528 if (msg_user(msg) == BCAST_PROTOCOL)
0529 return false;
0530 if (mss <= INT_H_SIZE + msg_size(msg))
0531 return false;
0532
0533
0534 if (unlikely(!tskb))
0535 return true;
0536
0537
0538 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
0539 *new_bundle = false;
0540 goto bundle;
0541 }
0542
0543
0544 tsz = msg_size(buf_msg(tskb));
0545 if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
0546 return true;
0547 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
0548 GFP_ATOMIC)))
0549 return true;
0550 inner = buf_msg(tskb);
0551 skb_push(tskb, INT_H_SIZE);
0552 outer = buf_msg(tskb);
0553 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
0554 dnode);
0555 msg_set_importance(outer, msg_importance(inner));
0556 msg_set_size(outer, INT_H_SIZE + tsz);
0557 msg_set_msgcnt(outer, 1);
0558 *new_bundle = true;
0559
0560 bundle:
0561 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
0562 consume_skb(*skb);
0563 *skb = NULL;
0564 }
0565 return true;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
0578 {
0579 struct tipc_msg *hdr, *ihdr;
0580 int imsz;
0581
0582 *iskb = NULL;
0583 if (unlikely(skb_linearize(skb)))
0584 goto none;
0585
0586 hdr = buf_msg(skb);
0587 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
0588 goto none;
0589
0590 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
0591 imsz = msg_size(ihdr);
0592
0593 if ((*pos + imsz) > msg_data_sz(hdr))
0594 goto none;
0595
0596 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
0597 if (!*iskb)
0598 goto none;
0599
0600 skb_copy_to_linear_data(*iskb, ihdr, imsz);
0601 if (unlikely(!tipc_msg_validate(iskb)))
0602 goto none;
0603
0604 *pos += BUF_ALIGN(imsz);
0605 return true;
0606 none:
0607 kfree_skb(skb);
0608 kfree_skb(*iskb);
0609 *iskb = NULL;
0610 return false;
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
0622 {
0623 struct sk_buff *_skb = *skb;
0624 struct tipc_msg *_hdr, *hdr;
0625 int hlen, dlen;
0626
0627 if (skb_linearize(_skb))
0628 goto exit;
0629 _hdr = buf_msg(_skb);
0630 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
0631 hlen = msg_hdr_sz(_hdr);
0632
0633 if (msg_dest_droppable(_hdr))
0634 goto exit;
0635 if (msg_errcode(_hdr))
0636 goto exit;
0637
0638
0639 if (hlen == SHORT_H_SIZE)
0640 hlen = BASIC_H_SIZE;
0641
0642
0643 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
0644 dlen = 0;
0645
0646
0647 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
0648 if (!*skb)
0649 goto exit;
0650 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
0651 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
0652
0653
0654 hdr = buf_msg(*skb);
0655 msg_set_hdr_sz(hdr, hlen);
0656 msg_set_errcode(hdr, err);
0657 msg_set_non_seq(hdr, 0);
0658 msg_set_origport(hdr, msg_destport(_hdr));
0659 msg_set_destport(hdr, msg_origport(_hdr));
0660 msg_set_destnode(hdr, msg_prevnode(_hdr));
0661 msg_set_prevnode(hdr, own_node);
0662 msg_set_orignode(hdr, own_node);
0663 msg_set_size(hdr, hlen + dlen);
0664 skb_orphan(_skb);
0665 kfree_skb(_skb);
0666 return true;
0667 exit:
0668 kfree_skb(_skb);
0669 *skb = NULL;
0670 return false;
0671 }
0672
0673 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
0674 {
0675 struct sk_buff *skb, *_skb;
0676
0677 skb_queue_walk(msg, skb) {
0678 _skb = skb_clone(skb, GFP_ATOMIC);
0679 if (!_skb) {
0680 __skb_queue_purge(cpy);
0681 pr_err_ratelimited("Failed to clone buffer chain\n");
0682 return false;
0683 }
0684 __skb_queue_tail(cpy, _skb);
0685 }
0686 return true;
0687 }
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
0698 {
0699 struct tipc_msg *msg = buf_msg(skb);
0700 u32 scope = msg_lookup_scope(msg);
0701 u32 self = tipc_own_addr(net);
0702 u32 inst = msg_nameinst(msg);
0703 struct tipc_socket_addr sk;
0704 struct tipc_uaddr ua;
0705
0706 if (!msg_isdata(msg))
0707 return false;
0708 if (!msg_named(msg))
0709 return false;
0710 if (msg_errcode(msg))
0711 return false;
0712 *err = TIPC_ERR_NO_NAME;
0713 if (skb_linearize(skb))
0714 return false;
0715 msg = buf_msg(skb);
0716 if (msg_reroute_cnt(msg))
0717 return false;
0718 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
0719 msg_nametype(msg), inst, inst);
0720 sk.node = tipc_scope2node(net, scope);
0721 if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
0722 return false;
0723 msg_incr_reroute_cnt(msg);
0724 if (sk.node != self)
0725 msg_set_prevnode(msg, self);
0726 msg_set_destnode(msg, sk.node);
0727 msg_set_destport(msg, sk.ref);
0728 *err = TIPC_OK;
0729
0730 return true;
0731 }
0732
0733
0734
0735 bool tipc_msg_assemble(struct sk_buff_head *list)
0736 {
0737 struct sk_buff *skb, *tmp = NULL;
0738
0739 if (skb_queue_len(list) == 1)
0740 return true;
0741
0742 while ((skb = __skb_dequeue(list))) {
0743 skb->next = NULL;
0744 if (tipc_buf_append(&tmp, &skb)) {
0745 __skb_queue_tail(list, skb);
0746 return true;
0747 }
0748 if (!tmp)
0749 break;
0750 }
0751 __skb_queue_purge(list);
0752 __skb_queue_head_init(list);
0753 pr_warn("Failed do assemble buffer\n");
0754 return false;
0755 }
0756
0757
0758
0759
0760 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
0761 {
0762 struct sk_buff *skb, *_skb;
0763 struct sk_buff *frag = NULL;
0764 struct sk_buff *head = NULL;
0765 int hdr_len;
0766
0767
0768 if (skb_queue_len(list) == 1) {
0769 skb = skb_peek(list);
0770 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
0771 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
0772 if (!_skb)
0773 return false;
0774 __skb_queue_tail(rcvq, _skb);
0775 return true;
0776 }
0777
0778
0779 skb_queue_walk(list, skb) {
0780 frag = skb_clone(skb, GFP_ATOMIC);
0781 if (!frag)
0782 goto error;
0783 frag->next = NULL;
0784 if (tipc_buf_append(&head, &frag))
0785 break;
0786 if (!head)
0787 goto error;
0788 }
0789 __skb_queue_tail(rcvq, frag);
0790 return true;
0791 error:
0792 pr_warn("Failed do clone local mcast rcv buffer\n");
0793 kfree_skb(head);
0794 return false;
0795 }
0796
0797 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
0798 struct sk_buff_head *cpy)
0799 {
0800 struct sk_buff *skb, *_skb;
0801
0802 skb_queue_walk(msg, skb) {
0803 _skb = pskb_copy(skb, GFP_ATOMIC);
0804 if (!_skb) {
0805 __skb_queue_purge(cpy);
0806 return false;
0807 }
0808 msg_set_destnode(buf_msg(_skb), dst);
0809 __skb_queue_tail(cpy, _skb);
0810 }
0811 return true;
0812 }
0813
0814
0815
0816
0817
0818
0819 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
0820 struct sk_buff *skb)
0821 {
0822 struct sk_buff *_skb, *tmp;
0823
0824 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
0825 __skb_queue_head(list, skb);
0826 return true;
0827 }
0828
0829 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
0830 __skb_queue_tail(list, skb);
0831 return true;
0832 }
0833
0834 skb_queue_walk_safe(list, _skb, tmp) {
0835 if (more(seqno, buf_seqno(_skb)))
0836 continue;
0837 if (seqno == buf_seqno(_skb))
0838 break;
0839 __skb_queue_before(list, _skb, skb);
0840 return true;
0841 }
0842 kfree_skb(skb);
0843 return false;
0844 }
0845
0846 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
0847 struct sk_buff_head *xmitq)
0848 {
0849 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
0850 __skb_queue_tail(xmitq, skb);
0851 }