0001
0002
0003
0004 #include <linux/skmsg.h>
0005 #include <linux/skbuff.h>
0006 #include <linux/scatterlist.h>
0007
0008 #include <net/sock.h>
0009 #include <net/tcp.h>
0010 #include <net/tls.h>
0011
0012 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
0013 {
0014 if (msg->sg.end > msg->sg.start &&
0015 elem_first_coalesce < msg->sg.end)
0016 return true;
0017
0018 if (msg->sg.end < msg->sg.start &&
0019 (elem_first_coalesce > msg->sg.start ||
0020 elem_first_coalesce < msg->sg.end))
0021 return true;
0022
0023 return false;
0024 }
0025
0026 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
0027 int elem_first_coalesce)
0028 {
0029 struct page_frag *pfrag = sk_page_frag(sk);
0030 u32 osize = msg->sg.size;
0031 int ret = 0;
0032
0033 len -= msg->sg.size;
0034 while (len > 0) {
0035 struct scatterlist *sge;
0036 u32 orig_offset;
0037 int use, i;
0038
0039 if (!sk_page_frag_refill(sk, pfrag)) {
0040 ret = -ENOMEM;
0041 goto msg_trim;
0042 }
0043
0044 orig_offset = pfrag->offset;
0045 use = min_t(int, len, pfrag->size - orig_offset);
0046 if (!sk_wmem_schedule(sk, use)) {
0047 ret = -ENOMEM;
0048 goto msg_trim;
0049 }
0050
0051 i = msg->sg.end;
0052 sk_msg_iter_var_prev(i);
0053 sge = &msg->sg.data[i];
0054
0055 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
0056 sg_page(sge) == pfrag->page &&
0057 sge->offset + sge->length == orig_offset) {
0058 sge->length += use;
0059 } else {
0060 if (sk_msg_full(msg)) {
0061 ret = -ENOSPC;
0062 break;
0063 }
0064
0065 sge = &msg->sg.data[msg->sg.end];
0066 sg_unmark_end(sge);
0067 sg_set_page(sge, pfrag->page, use, orig_offset);
0068 get_page(pfrag->page);
0069 sk_msg_iter_next(msg, end);
0070 }
0071
0072 sk_mem_charge(sk, use);
0073 msg->sg.size += use;
0074 pfrag->offset += use;
0075 len -= use;
0076 }
0077
0078 return ret;
0079
0080 msg_trim:
0081 sk_msg_trim(sk, msg, osize);
0082 return ret;
0083 }
0084 EXPORT_SYMBOL_GPL(sk_msg_alloc);
0085
0086 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
0087 u32 off, u32 len)
0088 {
0089 int i = src->sg.start;
0090 struct scatterlist *sge = sk_msg_elem(src, i);
0091 struct scatterlist *sgd = NULL;
0092 u32 sge_len, sge_off;
0093
0094 while (off) {
0095 if (sge->length > off)
0096 break;
0097 off -= sge->length;
0098 sk_msg_iter_var_next(i);
0099 if (i == src->sg.end && off)
0100 return -ENOSPC;
0101 sge = sk_msg_elem(src, i);
0102 }
0103
0104 while (len) {
0105 sge_len = sge->length - off;
0106 if (sge_len > len)
0107 sge_len = len;
0108
0109 if (dst->sg.end)
0110 sgd = sk_msg_elem(dst, dst->sg.end - 1);
0111
0112 if (sgd &&
0113 (sg_page(sge) == sg_page(sgd)) &&
0114 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
0115 sgd->length += sge_len;
0116 dst->sg.size += sge_len;
0117 } else if (!sk_msg_full(dst)) {
0118 sge_off = sge->offset + off;
0119 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
0120 } else {
0121 return -ENOSPC;
0122 }
0123
0124 off = 0;
0125 len -= sge_len;
0126 sk_mem_charge(sk, sge_len);
0127 sk_msg_iter_var_next(i);
0128 if (i == src->sg.end && len)
0129 return -ENOSPC;
0130 sge = sk_msg_elem(src, i);
0131 }
0132
0133 return 0;
0134 }
0135 EXPORT_SYMBOL_GPL(sk_msg_clone);
0136
0137 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
0138 {
0139 int i = msg->sg.start;
0140
0141 do {
0142 struct scatterlist *sge = sk_msg_elem(msg, i);
0143
0144 if (bytes < sge->length) {
0145 sge->length -= bytes;
0146 sge->offset += bytes;
0147 sk_mem_uncharge(sk, bytes);
0148 break;
0149 }
0150
0151 sk_mem_uncharge(sk, sge->length);
0152 bytes -= sge->length;
0153 sge->length = 0;
0154 sge->offset = 0;
0155 sk_msg_iter_var_next(i);
0156 } while (bytes && i != msg->sg.end);
0157 msg->sg.start = i;
0158 }
0159 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
0160
0161 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
0162 {
0163 int i = msg->sg.start;
0164
0165 do {
0166 struct scatterlist *sge = &msg->sg.data[i];
0167 int uncharge = (bytes < sge->length) ? bytes : sge->length;
0168
0169 sk_mem_uncharge(sk, uncharge);
0170 bytes -= uncharge;
0171 sk_msg_iter_var_next(i);
0172 } while (i != msg->sg.end);
0173 }
0174 EXPORT_SYMBOL_GPL(sk_msg_return);
0175
0176 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
0177 bool charge)
0178 {
0179 struct scatterlist *sge = sk_msg_elem(msg, i);
0180 u32 len = sge->length;
0181
0182
0183 if (!msg->skb) {
0184 if (charge)
0185 sk_mem_uncharge(sk, len);
0186 put_page(sg_page(sge));
0187 }
0188 memset(sge, 0, sizeof(*sge));
0189 return len;
0190 }
0191
0192 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
0193 bool charge)
0194 {
0195 struct scatterlist *sge = sk_msg_elem(msg, i);
0196 int freed = 0;
0197
0198 while (msg->sg.size) {
0199 msg->sg.size -= sge->length;
0200 freed += sk_msg_free_elem(sk, msg, i, charge);
0201 sk_msg_iter_var_next(i);
0202 sk_msg_check_to_free(msg, i, msg->sg.size);
0203 sge = sk_msg_elem(msg, i);
0204 }
0205 consume_skb(msg->skb);
0206 sk_msg_init(msg);
0207 return freed;
0208 }
0209
0210 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
0211 {
0212 return __sk_msg_free(sk, msg, msg->sg.start, false);
0213 }
0214 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
0215
0216 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
0217 {
0218 return __sk_msg_free(sk, msg, msg->sg.start, true);
0219 }
0220 EXPORT_SYMBOL_GPL(sk_msg_free);
0221
0222 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
0223 u32 bytes, bool charge)
0224 {
0225 struct scatterlist *sge;
0226 u32 i = msg->sg.start;
0227
0228 while (bytes) {
0229 sge = sk_msg_elem(msg, i);
0230 if (!sge->length)
0231 break;
0232 if (bytes < sge->length) {
0233 if (charge)
0234 sk_mem_uncharge(sk, bytes);
0235 sge->length -= bytes;
0236 sge->offset += bytes;
0237 msg->sg.size -= bytes;
0238 break;
0239 }
0240
0241 msg->sg.size -= sge->length;
0242 bytes -= sge->length;
0243 sk_msg_free_elem(sk, msg, i, charge);
0244 sk_msg_iter_var_next(i);
0245 sk_msg_check_to_free(msg, i, bytes);
0246 }
0247 msg->sg.start = i;
0248 }
0249
0250 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
0251 {
0252 __sk_msg_free_partial(sk, msg, bytes, true);
0253 }
0254 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
0255
0256 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
0257 u32 bytes)
0258 {
0259 __sk_msg_free_partial(sk, msg, bytes, false);
0260 }
0261
0262 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
0263 {
0264 int trim = msg->sg.size - len;
0265 u32 i = msg->sg.end;
0266
0267 if (trim <= 0) {
0268 WARN_ON(trim < 0);
0269 return;
0270 }
0271
0272 sk_msg_iter_var_prev(i);
0273 msg->sg.size = len;
0274 while (msg->sg.data[i].length &&
0275 trim >= msg->sg.data[i].length) {
0276 trim -= msg->sg.data[i].length;
0277 sk_msg_free_elem(sk, msg, i, true);
0278 sk_msg_iter_var_prev(i);
0279 if (!trim)
0280 goto out;
0281 }
0282
0283 msg->sg.data[i].length -= trim;
0284 sk_mem_uncharge(sk, trim);
0285
0286 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
0287 msg->sg.copybreak = msg->sg.data[i].length;
0288 out:
0289 sk_msg_iter_var_next(i);
0290 msg->sg.end = i;
0291
0292
0293
0294
0295
0296
0297
0298 if (!msg->sg.size) {
0299 msg->sg.curr = msg->sg.start;
0300 msg->sg.copybreak = 0;
0301 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
0302 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
0303 sk_msg_iter_var_prev(i);
0304 msg->sg.curr = i;
0305 msg->sg.copybreak = msg->sg.data[i].length;
0306 }
0307 }
0308 EXPORT_SYMBOL_GPL(sk_msg_trim);
0309
0310 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
0311 struct sk_msg *msg, u32 bytes)
0312 {
0313 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
0314 const int to_max_pages = MAX_MSG_FRAGS;
0315 struct page *pages[MAX_MSG_FRAGS];
0316 ssize_t orig, copied, use, offset;
0317
0318 orig = msg->sg.size;
0319 while (bytes > 0) {
0320 i = 0;
0321 maxpages = to_max_pages - num_elems;
0322 if (maxpages == 0) {
0323 ret = -EFAULT;
0324 goto out;
0325 }
0326
0327 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
0328 &offset);
0329 if (copied <= 0) {
0330 ret = -EFAULT;
0331 goto out;
0332 }
0333
0334 bytes -= copied;
0335 msg->sg.size += copied;
0336
0337 while (copied) {
0338 use = min_t(int, copied, PAGE_SIZE - offset);
0339 sg_set_page(&msg->sg.data[msg->sg.end],
0340 pages[i], use, offset);
0341 sg_unmark_end(&msg->sg.data[msg->sg.end]);
0342 sk_mem_charge(sk, use);
0343
0344 offset = 0;
0345 copied -= use;
0346 sk_msg_iter_next(msg, end);
0347 num_elems++;
0348 i++;
0349 }
0350
0351
0352
0353
0354 msg->sg.copybreak = 0;
0355 msg->sg.curr = msg->sg.end;
0356 }
0357 out:
0358
0359
0360
0361 if (ret)
0362 iov_iter_revert(from, msg->sg.size - orig);
0363 return ret;
0364 }
0365 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
0366
0367 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
0368 struct sk_msg *msg, u32 bytes)
0369 {
0370 int ret = -ENOSPC, i = msg->sg.curr;
0371 struct scatterlist *sge;
0372 u32 copy, buf_size;
0373 void *to;
0374
0375 do {
0376 sge = sk_msg_elem(msg, i);
0377
0378 if (msg->sg.copybreak >= sge->length) {
0379 msg->sg.copybreak = 0;
0380 sk_msg_iter_var_next(i);
0381 if (i == msg->sg.end)
0382 break;
0383 sge = sk_msg_elem(msg, i);
0384 }
0385
0386 buf_size = sge->length - msg->sg.copybreak;
0387 copy = (buf_size > bytes) ? bytes : buf_size;
0388 to = sg_virt(sge) + msg->sg.copybreak;
0389 msg->sg.copybreak += copy;
0390 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
0391 ret = copy_from_iter_nocache(to, copy, from);
0392 else
0393 ret = copy_from_iter(to, copy, from);
0394 if (ret != copy) {
0395 ret = -EFAULT;
0396 goto out;
0397 }
0398 bytes -= copy;
0399 if (!bytes)
0400 break;
0401 msg->sg.copybreak = 0;
0402 sk_msg_iter_var_next(i);
0403 } while (i != msg->sg.end);
0404 out:
0405 msg->sg.curr = i;
0406 return ret;
0407 }
0408 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
0409
0410
0411 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
0412 int len, int flags)
0413 {
0414 struct iov_iter *iter = &msg->msg_iter;
0415 int peek = flags & MSG_PEEK;
0416 struct sk_msg *msg_rx;
0417 int i, copied = 0;
0418
0419 msg_rx = sk_psock_peek_msg(psock);
0420 while (copied != len) {
0421 struct scatterlist *sge;
0422
0423 if (unlikely(!msg_rx))
0424 break;
0425
0426 i = msg_rx->sg.start;
0427 do {
0428 struct page *page;
0429 int copy;
0430
0431 sge = sk_msg_elem(msg_rx, i);
0432 copy = sge->length;
0433 page = sg_page(sge);
0434 if (copied + copy > len)
0435 copy = len - copied;
0436 copy = copy_page_to_iter(page, sge->offset, copy, iter);
0437 if (!copy)
0438 return copied ? copied : -EFAULT;
0439
0440 copied += copy;
0441 if (likely(!peek)) {
0442 sge->offset += copy;
0443 sge->length -= copy;
0444 if (!msg_rx->skb)
0445 sk_mem_uncharge(sk, copy);
0446 msg_rx->sg.size -= copy;
0447
0448 if (!sge->length) {
0449 sk_msg_iter_var_next(i);
0450 if (!msg_rx->skb)
0451 put_page(page);
0452 }
0453 } else {
0454
0455
0456
0457 if (copy != sge->length)
0458 return copied;
0459 sk_msg_iter_var_next(i);
0460 }
0461
0462 if (copied == len)
0463 break;
0464 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
0465
0466 if (unlikely(peek)) {
0467 msg_rx = sk_psock_next_msg(psock, msg_rx);
0468 if (!msg_rx)
0469 break;
0470 continue;
0471 }
0472
0473 msg_rx->sg.start = i;
0474 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
0475 msg_rx = sk_psock_dequeue_msg(psock);
0476 kfree_sk_msg(msg_rx);
0477 }
0478 msg_rx = sk_psock_peek_msg(psock);
0479 }
0480
0481 return copied;
0482 }
0483 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
0484
0485 bool sk_msg_is_readable(struct sock *sk)
0486 {
0487 struct sk_psock *psock;
0488 bool empty = true;
0489
0490 rcu_read_lock();
0491 psock = sk_psock(sk);
0492 if (likely(psock))
0493 empty = list_empty(&psock->ingress_msg);
0494 rcu_read_unlock();
0495 return !empty;
0496 }
0497 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
0498
0499 static struct sk_msg *alloc_sk_msg(void)
0500 {
0501 struct sk_msg *msg;
0502
0503 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
0504 if (unlikely(!msg))
0505 return NULL;
0506 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
0507 return msg;
0508 }
0509
0510 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
0511 struct sk_buff *skb)
0512 {
0513 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
0514 return NULL;
0515
0516 if (!sk_rmem_schedule(sk, skb, skb->truesize))
0517 return NULL;
0518
0519 return alloc_sk_msg();
0520 }
0521
0522 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
0523 u32 off, u32 len,
0524 struct sk_psock *psock,
0525 struct sock *sk,
0526 struct sk_msg *msg)
0527 {
0528 int num_sge, copied;
0529
0530 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
0531 if (num_sge < 0) {
0532
0533
0534
0535
0536
0537 if (skb_linearize(skb))
0538 return -EAGAIN;
0539
0540 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
0541 if (unlikely(num_sge < 0))
0542 return num_sge;
0543 }
0544
0545 copied = len;
0546 msg->sg.start = 0;
0547 msg->sg.size = copied;
0548 msg->sg.end = num_sge;
0549 msg->skb = skb;
0550
0551 sk_psock_queue_msg(psock, msg);
0552 sk_psock_data_ready(sk, psock);
0553 return copied;
0554 }
0555
0556 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
0557 u32 off, u32 len);
0558
0559 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
0560 u32 off, u32 len)
0561 {
0562 struct sock *sk = psock->sk;
0563 struct sk_msg *msg;
0564 int err;
0565
0566
0567
0568
0569
0570 if (unlikely(skb->sk == sk))
0571 return sk_psock_skb_ingress_self(psock, skb, off, len);
0572 msg = sk_psock_create_ingress_msg(sk, skb);
0573 if (!msg)
0574 return -EAGAIN;
0575
0576
0577
0578
0579
0580
0581
0582 skb_set_owner_r(skb, sk);
0583 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
0584 if (err < 0)
0585 kfree(msg);
0586 return err;
0587 }
0588
0589
0590
0591
0592
0593 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
0594 u32 off, u32 len)
0595 {
0596 struct sk_msg *msg = alloc_sk_msg();
0597 struct sock *sk = psock->sk;
0598 int err;
0599
0600 if (unlikely(!msg))
0601 return -EAGAIN;
0602 skb_set_owner_r(skb, sk);
0603 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
0604 if (err < 0)
0605 kfree(msg);
0606 return err;
0607 }
0608
0609 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
0610 u32 off, u32 len, bool ingress)
0611 {
0612 if (!ingress) {
0613 if (!sock_writeable(psock->sk))
0614 return -EAGAIN;
0615 return skb_send_sock(psock->sk, skb, off, len);
0616 }
0617 return sk_psock_skb_ingress(psock, skb, off, len);
0618 }
0619
0620 static void sk_psock_skb_state(struct sk_psock *psock,
0621 struct sk_psock_work_state *state,
0622 struct sk_buff *skb,
0623 int len, int off)
0624 {
0625 spin_lock_bh(&psock->ingress_lock);
0626 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
0627 state->skb = skb;
0628 state->len = len;
0629 state->off = off;
0630 } else {
0631 sock_drop(psock->sk, skb);
0632 }
0633 spin_unlock_bh(&psock->ingress_lock);
0634 }
0635
0636 static void sk_psock_backlog(struct work_struct *work)
0637 {
0638 struct sk_psock *psock = container_of(work, struct sk_psock, work);
0639 struct sk_psock_work_state *state = &psock->work_state;
0640 struct sk_buff *skb = NULL;
0641 bool ingress;
0642 u32 len, off;
0643 int ret;
0644
0645 mutex_lock(&psock->work_mutex);
0646 if (unlikely(state->skb)) {
0647 spin_lock_bh(&psock->ingress_lock);
0648 skb = state->skb;
0649 len = state->len;
0650 off = state->off;
0651 state->skb = NULL;
0652 spin_unlock_bh(&psock->ingress_lock);
0653 }
0654 if (skb)
0655 goto start;
0656
0657 while ((skb = skb_dequeue(&psock->ingress_skb))) {
0658 len = skb->len;
0659 off = 0;
0660 if (skb_bpf_strparser(skb)) {
0661 struct strp_msg *stm = strp_msg(skb);
0662
0663 off = stm->offset;
0664 len = stm->full_len;
0665 }
0666 start:
0667 ingress = skb_bpf_ingress(skb);
0668 skb_bpf_redirect_clear(skb);
0669 do {
0670 ret = -EIO;
0671 if (!sock_flag(psock->sk, SOCK_DEAD))
0672 ret = sk_psock_handle_skb(psock, skb, off,
0673 len, ingress);
0674 if (ret <= 0) {
0675 if (ret == -EAGAIN) {
0676 sk_psock_skb_state(psock, state, skb,
0677 len, off);
0678 goto end;
0679 }
0680
0681 sk_psock_report_error(psock, ret ? -ret : EPIPE);
0682 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
0683 sock_drop(psock->sk, skb);
0684 goto end;
0685 }
0686 off += ret;
0687 len -= ret;
0688 } while (len);
0689
0690 if (!ingress)
0691 kfree_skb(skb);
0692 }
0693 end:
0694 mutex_unlock(&psock->work_mutex);
0695 }
0696
0697 struct sk_psock *sk_psock_init(struct sock *sk, int node)
0698 {
0699 struct sk_psock *psock;
0700 struct proto *prot;
0701
0702 write_lock_bh(&sk->sk_callback_lock);
0703
0704 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
0705 psock = ERR_PTR(-EINVAL);
0706 goto out;
0707 }
0708
0709 if (sk->sk_user_data) {
0710 psock = ERR_PTR(-EBUSY);
0711 goto out;
0712 }
0713
0714 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
0715 if (!psock) {
0716 psock = ERR_PTR(-ENOMEM);
0717 goto out;
0718 }
0719
0720 prot = READ_ONCE(sk->sk_prot);
0721 psock->sk = sk;
0722 psock->eval = __SK_NONE;
0723 psock->sk_proto = prot;
0724 psock->saved_unhash = prot->unhash;
0725 psock->saved_destroy = prot->destroy;
0726 psock->saved_close = prot->close;
0727 psock->saved_write_space = sk->sk_write_space;
0728
0729 INIT_LIST_HEAD(&psock->link);
0730 spin_lock_init(&psock->link_lock);
0731
0732 INIT_WORK(&psock->work, sk_psock_backlog);
0733 mutex_init(&psock->work_mutex);
0734 INIT_LIST_HEAD(&psock->ingress_msg);
0735 spin_lock_init(&psock->ingress_lock);
0736 skb_queue_head_init(&psock->ingress_skb);
0737
0738 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
0739 refcount_set(&psock->refcnt, 1);
0740
0741 __rcu_assign_sk_user_data_with_flags(sk, psock,
0742 SK_USER_DATA_NOCOPY |
0743 SK_USER_DATA_PSOCK);
0744 sock_hold(sk);
0745
0746 out:
0747 write_unlock_bh(&sk->sk_callback_lock);
0748 return psock;
0749 }
0750 EXPORT_SYMBOL_GPL(sk_psock_init);
0751
0752 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
0753 {
0754 struct sk_psock_link *link;
0755
0756 spin_lock_bh(&psock->link_lock);
0757 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
0758 list);
0759 if (link)
0760 list_del(&link->list);
0761 spin_unlock_bh(&psock->link_lock);
0762 return link;
0763 }
0764
0765 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
0766 {
0767 struct sk_msg *msg, *tmp;
0768
0769 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
0770 list_del(&msg->list);
0771 sk_msg_free(psock->sk, msg);
0772 kfree(msg);
0773 }
0774 }
0775
0776 static void __sk_psock_zap_ingress(struct sk_psock *psock)
0777 {
0778 struct sk_buff *skb;
0779
0780 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
0781 skb_bpf_redirect_clear(skb);
0782 sock_drop(psock->sk, skb);
0783 }
0784 kfree_skb(psock->work_state.skb);
0785
0786
0787
0788 psock->work_state.skb = NULL;
0789 __sk_psock_purge_ingress_msg(psock);
0790 }
0791
0792 static void sk_psock_link_destroy(struct sk_psock *psock)
0793 {
0794 struct sk_psock_link *link, *tmp;
0795
0796 list_for_each_entry_safe(link, tmp, &psock->link, list) {
0797 list_del(&link->list);
0798 sk_psock_free_link(link);
0799 }
0800 }
0801
0802 void sk_psock_stop(struct sk_psock *psock, bool wait)
0803 {
0804 spin_lock_bh(&psock->ingress_lock);
0805 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
0806 sk_psock_cork_free(psock);
0807 __sk_psock_zap_ingress(psock);
0808 spin_unlock_bh(&psock->ingress_lock);
0809
0810 if (wait)
0811 cancel_work_sync(&psock->work);
0812 }
0813
0814 static void sk_psock_done_strp(struct sk_psock *psock);
0815
0816 static void sk_psock_destroy(struct work_struct *work)
0817 {
0818 struct sk_psock *psock = container_of(to_rcu_work(work),
0819 struct sk_psock, rwork);
0820
0821
0822 sk_psock_done_strp(psock);
0823
0824 cancel_work_sync(&psock->work);
0825 mutex_destroy(&psock->work_mutex);
0826
0827 psock_progs_drop(&psock->progs);
0828
0829 sk_psock_link_destroy(psock);
0830 sk_psock_cork_free(psock);
0831
0832 if (psock->sk_redir)
0833 sock_put(psock->sk_redir);
0834 sock_put(psock->sk);
0835 kfree(psock);
0836 }
0837
0838 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
0839 {
0840 write_lock_bh(&sk->sk_callback_lock);
0841 sk_psock_restore_proto(sk, psock);
0842 rcu_assign_sk_user_data(sk, NULL);
0843 if (psock->progs.stream_parser)
0844 sk_psock_stop_strp(sk, psock);
0845 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
0846 sk_psock_stop_verdict(sk, psock);
0847 write_unlock_bh(&sk->sk_callback_lock);
0848
0849 sk_psock_stop(psock, false);
0850
0851 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
0852 queue_rcu_work(system_wq, &psock->rwork);
0853 }
0854 EXPORT_SYMBOL_GPL(sk_psock_drop);
0855
0856 static int sk_psock_map_verd(int verdict, bool redir)
0857 {
0858 switch (verdict) {
0859 case SK_PASS:
0860 return redir ? __SK_REDIRECT : __SK_PASS;
0861 case SK_DROP:
0862 default:
0863 break;
0864 }
0865
0866 return __SK_DROP;
0867 }
0868
0869 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
0870 struct sk_msg *msg)
0871 {
0872 struct bpf_prog *prog;
0873 int ret;
0874
0875 rcu_read_lock();
0876 prog = READ_ONCE(psock->progs.msg_parser);
0877 if (unlikely(!prog)) {
0878 ret = __SK_PASS;
0879 goto out;
0880 }
0881
0882 sk_msg_compute_data_pointers(msg);
0883 msg->sk = sk;
0884 ret = bpf_prog_run_pin_on_cpu(prog, msg);
0885 ret = sk_psock_map_verd(ret, msg->sk_redir);
0886 psock->apply_bytes = msg->apply_bytes;
0887 if (ret == __SK_REDIRECT) {
0888 if (psock->sk_redir)
0889 sock_put(psock->sk_redir);
0890 psock->sk_redir = msg->sk_redir;
0891 if (!psock->sk_redir) {
0892 ret = __SK_DROP;
0893 goto out;
0894 }
0895 sock_hold(psock->sk_redir);
0896 }
0897 out:
0898 rcu_read_unlock();
0899 return ret;
0900 }
0901 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
0902
0903 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
0904 {
0905 struct sk_psock *psock_other;
0906 struct sock *sk_other;
0907
0908 sk_other = skb_bpf_redirect_fetch(skb);
0909
0910
0911
0912 if (unlikely(!sk_other)) {
0913 skb_bpf_redirect_clear(skb);
0914 sock_drop(from->sk, skb);
0915 return -EIO;
0916 }
0917 psock_other = sk_psock(sk_other);
0918
0919
0920
0921
0922 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
0923 skb_bpf_redirect_clear(skb);
0924 sock_drop(from->sk, skb);
0925 return -EIO;
0926 }
0927 spin_lock_bh(&psock_other->ingress_lock);
0928 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
0929 spin_unlock_bh(&psock_other->ingress_lock);
0930 skb_bpf_redirect_clear(skb);
0931 sock_drop(from->sk, skb);
0932 return -EIO;
0933 }
0934
0935 skb_queue_tail(&psock_other->ingress_skb, skb);
0936 schedule_work(&psock_other->work);
0937 spin_unlock_bh(&psock_other->ingress_lock);
0938 return 0;
0939 }
0940
0941 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
0942 struct sk_psock *from, int verdict)
0943 {
0944 switch (verdict) {
0945 case __SK_REDIRECT:
0946 sk_psock_skb_redirect(from, skb);
0947 break;
0948 case __SK_PASS:
0949 case __SK_DROP:
0950 default:
0951 break;
0952 }
0953 }
0954
0955 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
0956 {
0957 struct bpf_prog *prog;
0958 int ret = __SK_PASS;
0959
0960 rcu_read_lock();
0961 prog = READ_ONCE(psock->progs.stream_verdict);
0962 if (likely(prog)) {
0963 skb->sk = psock->sk;
0964 skb_dst_drop(skb);
0965 skb_bpf_redirect_clear(skb);
0966 ret = bpf_prog_run_pin_on_cpu(prog, skb);
0967 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
0968 skb->sk = NULL;
0969 }
0970 sk_psock_tls_verdict_apply(skb, psock, ret);
0971 rcu_read_unlock();
0972 return ret;
0973 }
0974 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
0975
0976 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
0977 int verdict)
0978 {
0979 struct sock *sk_other;
0980 int err = 0;
0981 u32 len, off;
0982
0983 switch (verdict) {
0984 case __SK_PASS:
0985 err = -EIO;
0986 sk_other = psock->sk;
0987 if (sock_flag(sk_other, SOCK_DEAD) ||
0988 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
0989 skb_bpf_redirect_clear(skb);
0990 goto out_free;
0991 }
0992
0993 skb_bpf_set_ingress(skb);
0994
0995
0996
0997
0998
0999
1000
1001 if (skb_queue_empty(&psock->ingress_skb)) {
1002 len = skb->len;
1003 off = 0;
1004 if (skb_bpf_strparser(skb)) {
1005 struct strp_msg *stm = strp_msg(skb);
1006
1007 off = stm->offset;
1008 len = stm->full_len;
1009 }
1010 err = sk_psock_skb_ingress_self(psock, skb, off, len);
1011 }
1012 if (err < 0) {
1013 spin_lock_bh(&psock->ingress_lock);
1014 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1015 skb_queue_tail(&psock->ingress_skb, skb);
1016 schedule_work(&psock->work);
1017 err = 0;
1018 }
1019 spin_unlock_bh(&psock->ingress_lock);
1020 if (err < 0) {
1021 skb_bpf_redirect_clear(skb);
1022 goto out_free;
1023 }
1024 }
1025 break;
1026 case __SK_REDIRECT:
1027 err = sk_psock_skb_redirect(psock, skb);
1028 break;
1029 case __SK_DROP:
1030 default:
1031 out_free:
1032 sock_drop(psock->sk, skb);
1033 }
1034
1035 return err;
1036 }
1037
1038 static void sk_psock_write_space(struct sock *sk)
1039 {
1040 struct sk_psock *psock;
1041 void (*write_space)(struct sock *sk) = NULL;
1042
1043 rcu_read_lock();
1044 psock = sk_psock(sk);
1045 if (likely(psock)) {
1046 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1047 schedule_work(&psock->work);
1048 write_space = psock->saved_write_space;
1049 }
1050 rcu_read_unlock();
1051 if (write_space)
1052 write_space(sk);
1053 }
1054
1055 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1056 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1057 {
1058 struct sk_psock *psock;
1059 struct bpf_prog *prog;
1060 int ret = __SK_DROP;
1061 struct sock *sk;
1062
1063 rcu_read_lock();
1064 sk = strp->sk;
1065 psock = sk_psock(sk);
1066 if (unlikely(!psock)) {
1067 sock_drop(sk, skb);
1068 goto out;
1069 }
1070 prog = READ_ONCE(psock->progs.stream_verdict);
1071 if (likely(prog)) {
1072 skb->sk = sk;
1073 skb_dst_drop(skb);
1074 skb_bpf_redirect_clear(skb);
1075 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1076 if (ret == SK_PASS)
1077 skb_bpf_set_strparser(skb);
1078 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1079 skb->sk = NULL;
1080 }
1081 sk_psock_verdict_apply(psock, skb, ret);
1082 out:
1083 rcu_read_unlock();
1084 }
1085
1086 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1087 {
1088 return err;
1089 }
1090
1091 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1092 {
1093 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1094 struct bpf_prog *prog;
1095 int ret = skb->len;
1096
1097 rcu_read_lock();
1098 prog = READ_ONCE(psock->progs.stream_parser);
1099 if (likely(prog)) {
1100 skb->sk = psock->sk;
1101 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1102 skb->sk = NULL;
1103 }
1104 rcu_read_unlock();
1105 return ret;
1106 }
1107
1108
1109 static void sk_psock_strp_data_ready(struct sock *sk)
1110 {
1111 struct sk_psock *psock;
1112
1113 rcu_read_lock();
1114 psock = sk_psock(sk);
1115 if (likely(psock)) {
1116 if (tls_sw_has_ctx_rx(sk)) {
1117 psock->saved_data_ready(sk);
1118 } else {
1119 write_lock_bh(&sk->sk_callback_lock);
1120 strp_data_ready(&psock->strp);
1121 write_unlock_bh(&sk->sk_callback_lock);
1122 }
1123 }
1124 rcu_read_unlock();
1125 }
1126
1127 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1128 {
1129 static const struct strp_callbacks cb = {
1130 .rcv_msg = sk_psock_strp_read,
1131 .read_sock_done = sk_psock_strp_read_done,
1132 .parse_msg = sk_psock_strp_parse,
1133 };
1134
1135 return strp_init(&psock->strp, sk, &cb);
1136 }
1137
1138 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1139 {
1140 if (psock->saved_data_ready)
1141 return;
1142
1143 psock->saved_data_ready = sk->sk_data_ready;
1144 sk->sk_data_ready = sk_psock_strp_data_ready;
1145 sk->sk_write_space = sk_psock_write_space;
1146 }
1147
1148 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1149 {
1150 psock_set_prog(&psock->progs.stream_parser, NULL);
1151
1152 if (!psock->saved_data_ready)
1153 return;
1154
1155 sk->sk_data_ready = psock->saved_data_ready;
1156 psock->saved_data_ready = NULL;
1157 strp_stop(&psock->strp);
1158 }
1159
1160 static void sk_psock_done_strp(struct sk_psock *psock)
1161 {
1162
1163 if (psock->progs.stream_parser)
1164 strp_done(&psock->strp);
1165 }
1166 #else
1167 static void sk_psock_done_strp(struct sk_psock *psock)
1168 {
1169 }
1170 #endif
1171
1172 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1173 {
1174 struct sk_psock *psock;
1175 struct bpf_prog *prog;
1176 int ret = __SK_DROP;
1177 int len = skb->len;
1178
1179 skb_get(skb);
1180
1181 rcu_read_lock();
1182 psock = sk_psock(sk);
1183 if (unlikely(!psock)) {
1184 len = 0;
1185 sock_drop(sk, skb);
1186 goto out;
1187 }
1188 prog = READ_ONCE(psock->progs.stream_verdict);
1189 if (!prog)
1190 prog = READ_ONCE(psock->progs.skb_verdict);
1191 if (likely(prog)) {
1192 skb_dst_drop(skb);
1193 skb_bpf_redirect_clear(skb);
1194 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1195 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1196 }
1197 ret = sk_psock_verdict_apply(psock, skb, ret);
1198 if (ret < 0)
1199 len = ret;
1200 out:
1201 rcu_read_unlock();
1202 return len;
1203 }
1204
1205 static void sk_psock_verdict_data_ready(struct sock *sk)
1206 {
1207 struct socket *sock = sk->sk_socket;
1208
1209 if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
1210 return;
1211 sock->ops->read_skb(sk, sk_psock_verdict_recv);
1212 }
1213
1214 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1215 {
1216 if (psock->saved_data_ready)
1217 return;
1218
1219 psock->saved_data_ready = sk->sk_data_ready;
1220 sk->sk_data_ready = sk_psock_verdict_data_ready;
1221 sk->sk_write_space = sk_psock_write_space;
1222 }
1223
1224 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1225 {
1226 psock_set_prog(&psock->progs.stream_verdict, NULL);
1227 psock_set_prog(&psock->progs.skb_verdict, NULL);
1228
1229 if (!psock->saved_data_ready)
1230 return;
1231
1232 sk->sk_data_ready = psock->saved_data_ready;
1233 psock->saved_data_ready = NULL;
1234 }