0001
0002 #include <linux/kernel.h>
0003 #include <linux/errno.h>
0004 #include <linux/file.h>
0005 #include <linux/slab.h>
0006 #include <linux/net.h>
0007 #include <linux/compat.h>
0008 #include <net/compat.h>
0009 #include <linux/io_uring.h>
0010
0011 #include <uapi/linux/io_uring.h>
0012
0013 #include "io_uring.h"
0014 #include "kbuf.h"
0015 #include "alloc_cache.h"
0016 #include "net.h"
0017 #include "notif.h"
0018 #include "rsrc.h"
0019
0020 #if defined(CONFIG_NET)
0021 struct io_shutdown {
0022 struct file *file;
0023 int how;
0024 };
0025
0026 struct io_accept {
0027 struct file *file;
0028 struct sockaddr __user *addr;
0029 int __user *addr_len;
0030 int flags;
0031 u32 file_slot;
0032 unsigned long nofile;
0033 };
0034
0035 struct io_socket {
0036 struct file *file;
0037 int domain;
0038 int type;
0039 int protocol;
0040 int flags;
0041 u32 file_slot;
0042 unsigned long nofile;
0043 };
0044
0045 struct io_connect {
0046 struct file *file;
0047 struct sockaddr __user *addr;
0048 int addr_len;
0049 };
0050
0051 struct io_sr_msg {
0052 struct file *file;
0053 union {
0054 struct compat_msghdr __user *umsg_compat;
0055 struct user_msghdr __user *umsg;
0056 void __user *buf;
0057 };
0058 unsigned msg_flags;
0059 unsigned flags;
0060 size_t len;
0061 size_t done_io;
0062 };
0063
0064 struct io_sendzc {
0065 struct file *file;
0066 void __user *buf;
0067 size_t len;
0068 unsigned msg_flags;
0069 unsigned flags;
0070 unsigned addr_len;
0071 void __user *addr;
0072 size_t done_io;
0073 struct io_kiocb *notif;
0074 };
0075
0076 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
0077
0078 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0079 {
0080 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
0081
0082 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
0083 sqe->buf_index || sqe->splice_fd_in))
0084 return -EINVAL;
0085
0086 shutdown->how = READ_ONCE(sqe->len);
0087 return 0;
0088 }
0089
0090 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
0091 {
0092 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
0093 struct socket *sock;
0094 int ret;
0095
0096 if (issue_flags & IO_URING_F_NONBLOCK)
0097 return -EAGAIN;
0098
0099 sock = sock_from_file(req->file);
0100 if (unlikely(!sock))
0101 return -ENOTSOCK;
0102
0103 ret = __sys_shutdown_sock(sock, shutdown->how);
0104 io_req_set_res(req, ret, 0);
0105 return IOU_OK;
0106 }
0107
0108 static bool io_net_retry(struct socket *sock, int flags)
0109 {
0110 if (!(flags & MSG_WAITALL))
0111 return false;
0112 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
0113 }
0114
0115 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
0116 {
0117 struct io_async_msghdr *hdr = req->async_data;
0118
0119 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
0120 return;
0121
0122
0123 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
0124 req->async_data = NULL;
0125 req->flags &= ~REQ_F_ASYNC_DATA;
0126 }
0127 }
0128
0129 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
0130 unsigned int issue_flags)
0131 {
0132 struct io_ring_ctx *ctx = req->ctx;
0133 struct io_cache_entry *entry;
0134
0135 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
0136 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
0137 struct io_async_msghdr *hdr;
0138
0139 hdr = container_of(entry, struct io_async_msghdr, cache);
0140 req->flags |= REQ_F_ASYNC_DATA;
0141 req->async_data = hdr;
0142 return hdr;
0143 }
0144
0145 if (!io_alloc_async_data(req))
0146 return req->async_data;
0147
0148 return NULL;
0149 }
0150
0151 static int io_setup_async_msg(struct io_kiocb *req,
0152 struct io_async_msghdr *kmsg,
0153 unsigned int issue_flags)
0154 {
0155 struct io_async_msghdr *async_msg;
0156
0157 if (req_has_async_data(req))
0158 return -EAGAIN;
0159 async_msg = io_recvmsg_alloc_async(req, issue_flags);
0160 if (!async_msg) {
0161 kfree(kmsg->free_iov);
0162 return -ENOMEM;
0163 }
0164 req->flags |= REQ_F_NEED_CLEANUP;
0165 memcpy(async_msg, kmsg, sizeof(*kmsg));
0166 async_msg->msg.msg_name = &async_msg->addr;
0167
0168 if (!async_msg->free_iov)
0169 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
0170
0171 return -EAGAIN;
0172 }
0173
0174 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
0175 struct io_async_msghdr *iomsg)
0176 {
0177 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0178
0179 iomsg->msg.msg_name = &iomsg->addr;
0180 iomsg->free_iov = iomsg->fast_iov;
0181 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
0182 &iomsg->free_iov);
0183 }
0184
0185 int io_sendzc_prep_async(struct io_kiocb *req)
0186 {
0187 struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
0188 struct io_async_msghdr *io;
0189 int ret;
0190
0191 if (!zc->addr || req_has_async_data(req))
0192 return 0;
0193 if (io_alloc_async_data(req))
0194 return -ENOMEM;
0195
0196 io = req->async_data;
0197 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
0198 return ret;
0199 }
0200
0201 static int io_setup_async_addr(struct io_kiocb *req,
0202 struct sockaddr_storage *addr,
0203 unsigned int issue_flags)
0204 {
0205 struct io_async_msghdr *io;
0206
0207 if (!addr || req_has_async_data(req))
0208 return -EAGAIN;
0209 if (io_alloc_async_data(req))
0210 return -ENOMEM;
0211 io = req->async_data;
0212 memcpy(&io->addr, addr, sizeof(io->addr));
0213 return -EAGAIN;
0214 }
0215
0216 int io_sendmsg_prep_async(struct io_kiocb *req)
0217 {
0218 int ret;
0219
0220 ret = io_sendmsg_copy_hdr(req, req->async_data);
0221 if (!ret)
0222 req->flags |= REQ_F_NEED_CLEANUP;
0223 return ret;
0224 }
0225
0226 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
0227 {
0228 struct io_async_msghdr *io = req->async_data;
0229
0230 kfree(io->free_iov);
0231 }
0232
0233 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0234 {
0235 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0236
0237 if (unlikely(sqe->file_index || sqe->addr2))
0238 return -EINVAL;
0239
0240 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0241 sr->len = READ_ONCE(sqe->len);
0242 sr->flags = READ_ONCE(sqe->ioprio);
0243 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
0244 return -EINVAL;
0245 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
0246 if (sr->msg_flags & MSG_DONTWAIT)
0247 req->flags |= REQ_F_NOWAIT;
0248
0249 #ifdef CONFIG_COMPAT
0250 if (req->ctx->compat)
0251 sr->msg_flags |= MSG_CMSG_COMPAT;
0252 #endif
0253 sr->done_io = 0;
0254 return 0;
0255 }
0256
0257 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
0258 {
0259 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0260 struct io_async_msghdr iomsg, *kmsg;
0261 struct socket *sock;
0262 unsigned flags;
0263 int min_ret = 0;
0264 int ret;
0265
0266 sock = sock_from_file(req->file);
0267 if (unlikely(!sock))
0268 return -ENOTSOCK;
0269
0270 if (req_has_async_data(req)) {
0271 kmsg = req->async_data;
0272 } else {
0273 ret = io_sendmsg_copy_hdr(req, &iomsg);
0274 if (ret)
0275 return ret;
0276 kmsg = &iomsg;
0277 }
0278
0279 if (!(req->flags & REQ_F_POLLED) &&
0280 (sr->flags & IORING_RECVSEND_POLL_FIRST))
0281 return io_setup_async_msg(req, kmsg, issue_flags);
0282
0283 flags = sr->msg_flags;
0284 if (issue_flags & IO_URING_F_NONBLOCK)
0285 flags |= MSG_DONTWAIT;
0286 if (flags & MSG_WAITALL)
0287 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
0288
0289 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
0290
0291 if (ret < min_ret) {
0292 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
0293 return io_setup_async_msg(req, kmsg, issue_flags);
0294 if (ret == -ERESTARTSYS)
0295 ret = -EINTR;
0296 if (ret > 0 && io_net_retry(sock, flags)) {
0297 sr->done_io += ret;
0298 req->flags |= REQ_F_PARTIAL_IO;
0299 return io_setup_async_msg(req, kmsg, issue_flags);
0300 }
0301 req_set_fail(req);
0302 }
0303
0304 if (kmsg->free_iov)
0305 kfree(kmsg->free_iov);
0306 req->flags &= ~REQ_F_NEED_CLEANUP;
0307 io_netmsg_recycle(req, issue_flags);
0308 if (ret >= 0)
0309 ret += sr->done_io;
0310 else if (sr->done_io)
0311 ret = sr->done_io;
0312 io_req_set_res(req, ret, 0);
0313 return IOU_OK;
0314 }
0315
0316 int io_send(struct io_kiocb *req, unsigned int issue_flags)
0317 {
0318 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0319 struct msghdr msg;
0320 struct iovec iov;
0321 struct socket *sock;
0322 unsigned flags;
0323 int min_ret = 0;
0324 int ret;
0325
0326 if (!(req->flags & REQ_F_POLLED) &&
0327 (sr->flags & IORING_RECVSEND_POLL_FIRST))
0328 return -EAGAIN;
0329
0330 sock = sock_from_file(req->file);
0331 if (unlikely(!sock))
0332 return -ENOTSOCK;
0333
0334 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
0335 if (unlikely(ret))
0336 return ret;
0337
0338 msg.msg_name = NULL;
0339 msg.msg_control = NULL;
0340 msg.msg_controllen = 0;
0341 msg.msg_namelen = 0;
0342 msg.msg_ubuf = NULL;
0343
0344 flags = sr->msg_flags;
0345 if (issue_flags & IO_URING_F_NONBLOCK)
0346 flags |= MSG_DONTWAIT;
0347 if (flags & MSG_WAITALL)
0348 min_ret = iov_iter_count(&msg.msg_iter);
0349
0350 msg.msg_flags = flags;
0351 ret = sock_sendmsg(sock, &msg);
0352 if (ret < min_ret) {
0353 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
0354 return -EAGAIN;
0355 if (ret == -ERESTARTSYS)
0356 ret = -EINTR;
0357 if (ret > 0 && io_net_retry(sock, flags)) {
0358 sr->len -= ret;
0359 sr->buf += ret;
0360 sr->done_io += ret;
0361 req->flags |= REQ_F_PARTIAL_IO;
0362 return -EAGAIN;
0363 }
0364 req_set_fail(req);
0365 }
0366 if (ret >= 0)
0367 ret += sr->done_io;
0368 else if (sr->done_io)
0369 ret = sr->done_io;
0370 io_req_set_res(req, ret, 0);
0371 return IOU_OK;
0372 }
0373
0374 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
0375 {
0376 int hdr;
0377
0378 if (iomsg->namelen < 0)
0379 return true;
0380 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
0381 iomsg->namelen, &hdr))
0382 return true;
0383 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
0384 return true;
0385
0386 return false;
0387 }
0388
0389 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
0390 struct io_async_msghdr *iomsg)
0391 {
0392 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0393 struct user_msghdr msg;
0394 int ret;
0395
0396 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
0397 return -EFAULT;
0398
0399 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
0400 if (ret)
0401 return ret;
0402
0403 if (req->flags & REQ_F_BUFFER_SELECT) {
0404 if (msg.msg_iovlen == 0) {
0405 sr->len = iomsg->fast_iov[0].iov_len = 0;
0406 iomsg->fast_iov[0].iov_base = NULL;
0407 iomsg->free_iov = NULL;
0408 } else if (msg.msg_iovlen > 1) {
0409 return -EINVAL;
0410 } else {
0411 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
0412 return -EFAULT;
0413 sr->len = iomsg->fast_iov[0].iov_len;
0414 iomsg->free_iov = NULL;
0415 }
0416
0417 if (req->flags & REQ_F_APOLL_MULTISHOT) {
0418 iomsg->namelen = msg.msg_namelen;
0419 iomsg->controllen = msg.msg_controllen;
0420 if (io_recvmsg_multishot_overflow(iomsg))
0421 return -EOVERFLOW;
0422 }
0423 } else {
0424 iomsg->free_iov = iomsg->fast_iov;
0425 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
0426 &iomsg->free_iov, &iomsg->msg.msg_iter,
0427 false);
0428 if (ret > 0)
0429 ret = 0;
0430 }
0431
0432 return ret;
0433 }
0434
0435 #ifdef CONFIG_COMPAT
0436 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
0437 struct io_async_msghdr *iomsg)
0438 {
0439 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0440 struct compat_msghdr msg;
0441 struct compat_iovec __user *uiov;
0442 int ret;
0443
0444 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
0445 return -EFAULT;
0446
0447 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
0448 if (ret)
0449 return ret;
0450
0451 uiov = compat_ptr(msg.msg_iov);
0452 if (req->flags & REQ_F_BUFFER_SELECT) {
0453 compat_ssize_t clen;
0454
0455 if (msg.msg_iovlen == 0) {
0456 sr->len = 0;
0457 iomsg->free_iov = NULL;
0458 } else if (msg.msg_iovlen > 1) {
0459 return -EINVAL;
0460 } else {
0461 if (!access_ok(uiov, sizeof(*uiov)))
0462 return -EFAULT;
0463 if (__get_user(clen, &uiov->iov_len))
0464 return -EFAULT;
0465 if (clen < 0)
0466 return -EINVAL;
0467 sr->len = clen;
0468 iomsg->free_iov = NULL;
0469 }
0470
0471 if (req->flags & REQ_F_APOLL_MULTISHOT) {
0472 iomsg->namelen = msg.msg_namelen;
0473 iomsg->controllen = msg.msg_controllen;
0474 if (io_recvmsg_multishot_overflow(iomsg))
0475 return -EOVERFLOW;
0476 }
0477 } else {
0478 iomsg->free_iov = iomsg->fast_iov;
0479 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
0480 UIO_FASTIOV, &iomsg->free_iov,
0481 &iomsg->msg.msg_iter, true);
0482 if (ret < 0)
0483 return ret;
0484 }
0485
0486 return 0;
0487 }
0488 #endif
0489
0490 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
0491 struct io_async_msghdr *iomsg)
0492 {
0493 iomsg->msg.msg_name = &iomsg->addr;
0494
0495 #ifdef CONFIG_COMPAT
0496 if (req->ctx->compat)
0497 return __io_compat_recvmsg_copy_hdr(req, iomsg);
0498 #endif
0499
0500 return __io_recvmsg_copy_hdr(req, iomsg);
0501 }
0502
0503 int io_recvmsg_prep_async(struct io_kiocb *req)
0504 {
0505 int ret;
0506
0507 ret = io_recvmsg_copy_hdr(req, req->async_data);
0508 if (!ret)
0509 req->flags |= REQ_F_NEED_CLEANUP;
0510 return ret;
0511 }
0512
0513 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
0514
0515 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0516 {
0517 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0518
0519 if (unlikely(sqe->file_index || sqe->addr2))
0520 return -EINVAL;
0521
0522 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0523 sr->len = READ_ONCE(sqe->len);
0524 sr->flags = READ_ONCE(sqe->ioprio);
0525 if (sr->flags & ~(RECVMSG_FLAGS))
0526 return -EINVAL;
0527 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
0528 if (sr->msg_flags & MSG_DONTWAIT)
0529 req->flags |= REQ_F_NOWAIT;
0530 if (sr->msg_flags & MSG_ERRQUEUE)
0531 req->flags |= REQ_F_CLEAR_POLLIN;
0532 if (sr->flags & IORING_RECV_MULTISHOT) {
0533 if (!(req->flags & REQ_F_BUFFER_SELECT))
0534 return -EINVAL;
0535 if (sr->msg_flags & MSG_WAITALL)
0536 return -EINVAL;
0537 if (req->opcode == IORING_OP_RECV && sr->len)
0538 return -EINVAL;
0539 req->flags |= REQ_F_APOLL_MULTISHOT;
0540 }
0541
0542 #ifdef CONFIG_COMPAT
0543 if (req->ctx->compat)
0544 sr->msg_flags |= MSG_CMSG_COMPAT;
0545 #endif
0546 sr->done_io = 0;
0547 return 0;
0548 }
0549
0550 static inline void io_recv_prep_retry(struct io_kiocb *req)
0551 {
0552 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0553
0554 sr->done_io = 0;
0555 sr->len = 0;
0556 }
0557
0558
0559
0560
0561
0562
0563
0564 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
0565 unsigned int cflags, bool mshot_finished)
0566 {
0567 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
0568 io_req_set_res(req, *ret, cflags);
0569 *ret = IOU_OK;
0570 return true;
0571 }
0572
0573 if (!mshot_finished) {
0574 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
0575 cflags | IORING_CQE_F_MORE, false)) {
0576 io_recv_prep_retry(req);
0577 return false;
0578 }
0579
0580
0581
0582
0583
0584 }
0585
0586 io_req_set_res(req, *ret, cflags);
0587
0588 if (req->flags & REQ_F_POLLED)
0589 *ret = IOU_STOP_MULTISHOT;
0590 else
0591 *ret = IOU_OK;
0592 return true;
0593 }
0594
0595 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
0596 struct io_sr_msg *sr, void __user **buf,
0597 size_t *len)
0598 {
0599 unsigned long ubuf = (unsigned long) *buf;
0600 unsigned long hdr;
0601
0602 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
0603 kmsg->controllen;
0604 if (*len < hdr)
0605 return -EFAULT;
0606
0607 if (kmsg->controllen) {
0608 unsigned long control = ubuf + hdr - kmsg->controllen;
0609
0610 kmsg->msg.msg_control_user = (void __user *) control;
0611 kmsg->msg.msg_controllen = kmsg->controllen;
0612 }
0613
0614 sr->buf = *buf;
0615 *buf = (void __user *) (ubuf + hdr);
0616 kmsg->payloadlen = *len = *len - hdr;
0617 return 0;
0618 }
0619
0620 struct io_recvmsg_multishot_hdr {
0621 struct io_uring_recvmsg_out msg;
0622 struct sockaddr_storage addr;
0623 };
0624
0625 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
0626 struct io_async_msghdr *kmsg,
0627 unsigned int flags, bool *finished)
0628 {
0629 int err;
0630 int copy_len;
0631 struct io_recvmsg_multishot_hdr hdr;
0632
0633 if (kmsg->namelen)
0634 kmsg->msg.msg_name = &hdr.addr;
0635 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
0636 kmsg->msg.msg_namelen = 0;
0637
0638 if (sock->file->f_flags & O_NONBLOCK)
0639 flags |= MSG_DONTWAIT;
0640
0641 err = sock_recvmsg(sock, &kmsg->msg, flags);
0642 *finished = err <= 0;
0643 if (err < 0)
0644 return err;
0645
0646 hdr.msg = (struct io_uring_recvmsg_out) {
0647 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
0648 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
0649 };
0650
0651 hdr.msg.payloadlen = err;
0652 if (err > kmsg->payloadlen)
0653 err = kmsg->payloadlen;
0654
0655 copy_len = sizeof(struct io_uring_recvmsg_out);
0656 if (kmsg->msg.msg_namelen > kmsg->namelen)
0657 copy_len += kmsg->namelen;
0658 else
0659 copy_len += kmsg->msg.msg_namelen;
0660
0661
0662
0663
0664
0665 hdr.msg.namelen = kmsg->msg.msg_namelen;
0666
0667
0668 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
0669 sizeof(struct io_uring_recvmsg_out));
0670 if (copy_to_user(io->buf, &hdr, copy_len)) {
0671 *finished = true;
0672 return -EFAULT;
0673 }
0674
0675 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
0676 kmsg->controllen + err;
0677 }
0678
0679 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
0680 {
0681 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0682 struct io_async_msghdr iomsg, *kmsg;
0683 struct socket *sock;
0684 unsigned int cflags;
0685 unsigned flags;
0686 int ret, min_ret = 0;
0687 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
0688 bool mshot_finished = true;
0689
0690 sock = sock_from_file(req->file);
0691 if (unlikely(!sock))
0692 return -ENOTSOCK;
0693
0694 if (req_has_async_data(req)) {
0695 kmsg = req->async_data;
0696 } else {
0697 ret = io_recvmsg_copy_hdr(req, &iomsg);
0698 if (ret)
0699 return ret;
0700 kmsg = &iomsg;
0701 }
0702
0703 if (!(req->flags & REQ_F_POLLED) &&
0704 (sr->flags & IORING_RECVSEND_POLL_FIRST))
0705 return io_setup_async_msg(req, kmsg, issue_flags);
0706
0707 retry_multishot:
0708 if (io_do_buffer_select(req)) {
0709 void __user *buf;
0710 size_t len = sr->len;
0711
0712 buf = io_buffer_select(req, &len, issue_flags);
0713 if (!buf)
0714 return -ENOBUFS;
0715
0716 if (req->flags & REQ_F_APOLL_MULTISHOT) {
0717 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
0718 if (ret) {
0719 io_kbuf_recycle(req, issue_flags);
0720 return ret;
0721 }
0722 }
0723
0724 kmsg->fast_iov[0].iov_base = buf;
0725 kmsg->fast_iov[0].iov_len = len;
0726 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
0727 len);
0728 }
0729
0730 flags = sr->msg_flags;
0731 if (force_nonblock)
0732 flags |= MSG_DONTWAIT;
0733 if (flags & MSG_WAITALL)
0734 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
0735
0736 kmsg->msg.msg_get_inq = 1;
0737 if (req->flags & REQ_F_APOLL_MULTISHOT)
0738 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
0739 &mshot_finished);
0740 else
0741 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
0742 kmsg->uaddr, flags);
0743
0744 if (ret < min_ret) {
0745 if (ret == -EAGAIN && force_nonblock) {
0746 ret = io_setup_async_msg(req, kmsg, issue_flags);
0747 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
0748 IO_APOLL_MULTI_POLLED) {
0749 io_kbuf_recycle(req, issue_flags);
0750 return IOU_ISSUE_SKIP_COMPLETE;
0751 }
0752 return ret;
0753 }
0754 if (ret == -ERESTARTSYS)
0755 ret = -EINTR;
0756 if (ret > 0 && io_net_retry(sock, flags)) {
0757 sr->done_io += ret;
0758 req->flags |= REQ_F_PARTIAL_IO;
0759 return io_setup_async_msg(req, kmsg, issue_flags);
0760 }
0761 req_set_fail(req);
0762 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
0763 req_set_fail(req);
0764 }
0765
0766 if (ret > 0)
0767 ret += sr->done_io;
0768 else if (sr->done_io)
0769 ret = sr->done_io;
0770 else
0771 io_kbuf_recycle(req, issue_flags);
0772
0773 cflags = io_put_kbuf(req, issue_flags);
0774 if (kmsg->msg.msg_inq)
0775 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
0776
0777 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
0778 goto retry_multishot;
0779
0780 if (mshot_finished) {
0781 io_netmsg_recycle(req, issue_flags);
0782
0783 if (kmsg->free_iov)
0784 kfree(kmsg->free_iov);
0785 req->flags &= ~REQ_F_NEED_CLEANUP;
0786 }
0787
0788 return ret;
0789 }
0790
0791 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
0792 {
0793 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
0794 struct msghdr msg;
0795 struct socket *sock;
0796 struct iovec iov;
0797 unsigned int cflags;
0798 unsigned flags;
0799 int ret, min_ret = 0;
0800 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
0801 size_t len = sr->len;
0802
0803 if (!(req->flags & REQ_F_POLLED) &&
0804 (sr->flags & IORING_RECVSEND_POLL_FIRST))
0805 return -EAGAIN;
0806
0807 sock = sock_from_file(req->file);
0808 if (unlikely(!sock))
0809 return -ENOTSOCK;
0810
0811 retry_multishot:
0812 if (io_do_buffer_select(req)) {
0813 void __user *buf;
0814
0815 buf = io_buffer_select(req, &len, issue_flags);
0816 if (!buf)
0817 return -ENOBUFS;
0818 sr->buf = buf;
0819 }
0820
0821 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
0822 if (unlikely(ret))
0823 goto out_free;
0824
0825 msg.msg_name = NULL;
0826 msg.msg_namelen = 0;
0827 msg.msg_control = NULL;
0828 msg.msg_get_inq = 1;
0829 msg.msg_flags = 0;
0830 msg.msg_controllen = 0;
0831 msg.msg_iocb = NULL;
0832 msg.msg_ubuf = NULL;
0833
0834 flags = sr->msg_flags;
0835 if (force_nonblock)
0836 flags |= MSG_DONTWAIT;
0837 if (flags & MSG_WAITALL)
0838 min_ret = iov_iter_count(&msg.msg_iter);
0839
0840 ret = sock_recvmsg(sock, &msg, flags);
0841 if (ret < min_ret) {
0842 if (ret == -EAGAIN && force_nonblock) {
0843 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
0844 io_kbuf_recycle(req, issue_flags);
0845 return IOU_ISSUE_SKIP_COMPLETE;
0846 }
0847
0848 return -EAGAIN;
0849 }
0850 if (ret == -ERESTARTSYS)
0851 ret = -EINTR;
0852 if (ret > 0 && io_net_retry(sock, flags)) {
0853 sr->len -= ret;
0854 sr->buf += ret;
0855 sr->done_io += ret;
0856 req->flags |= REQ_F_PARTIAL_IO;
0857 return -EAGAIN;
0858 }
0859 req_set_fail(req);
0860 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
0861 out_free:
0862 req_set_fail(req);
0863 }
0864
0865 if (ret > 0)
0866 ret += sr->done_io;
0867 else if (sr->done_io)
0868 ret = sr->done_io;
0869 else
0870 io_kbuf_recycle(req, issue_flags);
0871
0872 cflags = io_put_kbuf(req, issue_flags);
0873 if (msg.msg_inq)
0874 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
0875
0876 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
0877 goto retry_multishot;
0878
0879 return ret;
0880 }
0881
0882 void io_sendzc_cleanup(struct io_kiocb *req)
0883 {
0884 struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
0885
0886 zc->notif->flags |= REQ_F_CQE_SKIP;
0887 io_notif_flush(zc->notif);
0888 zc->notif = NULL;
0889 }
0890
0891 int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0892 {
0893 struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
0894 struct io_ring_ctx *ctx = req->ctx;
0895 struct io_kiocb *notif;
0896
0897 if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) ||
0898 READ_ONCE(sqe->__pad3[0]))
0899 return -EINVAL;
0900
0901 if (req->flags & REQ_F_CQE_SKIP)
0902 return -EINVAL;
0903
0904 zc->flags = READ_ONCE(sqe->ioprio);
0905 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
0906 IORING_RECVSEND_FIXED_BUF))
0907 return -EINVAL;
0908 notif = zc->notif = io_alloc_notif(ctx);
0909 if (!notif)
0910 return -ENOMEM;
0911 notif->cqe.user_data = req->cqe.user_data;
0912 notif->cqe.res = 0;
0913 notif->cqe.flags = IORING_CQE_F_NOTIF;
0914 req->flags |= REQ_F_NEED_CLEANUP;
0915 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
0916 unsigned idx = READ_ONCE(sqe->buf_index);
0917
0918 if (unlikely(idx >= ctx->nr_user_bufs))
0919 return -EFAULT;
0920 idx = array_index_nospec(idx, ctx->nr_user_bufs);
0921 req->imu = READ_ONCE(ctx->user_bufs[idx]);
0922 io_req_set_rsrc_node(notif, ctx, 0);
0923 }
0924
0925 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
0926 zc->len = READ_ONCE(sqe->len);
0927 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
0928 if (zc->msg_flags & MSG_DONTWAIT)
0929 req->flags |= REQ_F_NOWAIT;
0930
0931 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
0932 zc->addr_len = READ_ONCE(sqe->addr_len);
0933 zc->done_io = 0;
0934
0935 #ifdef CONFIG_COMPAT
0936 if (req->ctx->compat)
0937 zc->msg_flags |= MSG_CMSG_COMPAT;
0938 #endif
0939 return 0;
0940 }
0941
0942 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
0943 struct iov_iter *from, size_t length)
0944 {
0945 struct skb_shared_info *shinfo = skb_shinfo(skb);
0946 int frag = shinfo->nr_frags;
0947 int ret = 0;
0948 struct bvec_iter bi;
0949 ssize_t copied = 0;
0950 unsigned long truesize = 0;
0951
0952 if (!shinfo->nr_frags)
0953 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
0954
0955 if (!skb_zcopy_managed(skb) || !iov_iter_is_bvec(from)) {
0956 skb_zcopy_downgrade_managed(skb);
0957 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
0958 }
0959
0960 bi.bi_size = min(from->count, length);
0961 bi.bi_bvec_done = from->iov_offset;
0962 bi.bi_idx = 0;
0963
0964 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
0965 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
0966
0967 copied += v.bv_len;
0968 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
0969 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
0970 v.bv_offset, v.bv_len);
0971 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
0972 }
0973 if (bi.bi_size)
0974 ret = -EMSGSIZE;
0975
0976 shinfo->nr_frags = frag;
0977 from->bvec += bi.bi_idx;
0978 from->nr_segs -= bi.bi_idx;
0979 from->count -= copied;
0980 from->iov_offset = bi.bi_bvec_done;
0981
0982 skb->data_len += copied;
0983 skb->len += copied;
0984 skb->truesize += truesize;
0985
0986 if (sk && sk->sk_type == SOCK_STREAM) {
0987 sk_wmem_queued_add(sk, truesize);
0988 if (!skb_zcopy_pure(skb))
0989 sk_mem_charge(sk, truesize);
0990 } else {
0991 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
0992 }
0993 return ret;
0994 }
0995
0996 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
0997 {
0998 struct sockaddr_storage __address, *addr = NULL;
0999 struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
1000 struct msghdr msg;
1001 struct iovec iov;
1002 struct socket *sock;
1003 unsigned msg_flags, cflags;
1004 int ret, min_ret = 0;
1005
1006 sock = sock_from_file(req->file);
1007 if (unlikely(!sock))
1008 return -ENOTSOCK;
1009
1010 msg.msg_name = NULL;
1011 msg.msg_control = NULL;
1012 msg.msg_controllen = 0;
1013 msg.msg_namelen = 0;
1014
1015 if (zc->addr) {
1016 if (req_has_async_data(req)) {
1017 struct io_async_msghdr *io = req->async_data;
1018
1019 msg.msg_name = addr = &io->addr;
1020 } else {
1021 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1022 if (unlikely(ret < 0))
1023 return ret;
1024 msg.msg_name = (struct sockaddr *)&__address;
1025 addr = &__address;
1026 }
1027 msg.msg_namelen = zc->addr_len;
1028 }
1029
1030 if (!(req->flags & REQ_F_POLLED) &&
1031 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1032 return io_setup_async_addr(req, addr, issue_flags);
1033
1034 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1035 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1036 (u64)(uintptr_t)zc->buf, zc->len);
1037 if (unlikely(ret))
1038 return ret;
1039 } else {
1040 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1041 &msg.msg_iter);
1042 if (unlikely(ret))
1043 return ret;
1044 ret = io_notif_account_mem(zc->notif, zc->len);
1045 if (unlikely(ret))
1046 return ret;
1047 }
1048
1049 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1050 if (issue_flags & IO_URING_F_NONBLOCK)
1051 msg_flags |= MSG_DONTWAIT;
1052 if (msg_flags & MSG_WAITALL)
1053 min_ret = iov_iter_count(&msg.msg_iter);
1054
1055 msg.msg_flags = msg_flags;
1056 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1057 msg.sg_from_iter = io_sg_from_iter;
1058 ret = sock_sendmsg(sock, &msg);
1059
1060 if (unlikely(ret < min_ret)) {
1061 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1062 return io_setup_async_addr(req, addr, issue_flags);
1063
1064 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1065 zc->len -= ret;
1066 zc->buf += ret;
1067 zc->done_io += ret;
1068 req->flags |= REQ_F_PARTIAL_IO;
1069 return io_setup_async_addr(req, addr, issue_flags);
1070 }
1071 if (ret < 0 && !zc->done_io)
1072 zc->notif->flags |= REQ_F_CQE_SKIP;
1073 if (ret == -ERESTARTSYS)
1074 ret = -EINTR;
1075 req_set_fail(req);
1076 }
1077
1078 if (ret >= 0)
1079 ret += zc->done_io;
1080 else if (zc->done_io)
1081 ret = zc->done_io;
1082
1083 io_notif_flush(zc->notif);
1084 req->flags &= ~REQ_F_NEED_CLEANUP;
1085 cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
1086 io_req_set_res(req, ret, cflags);
1087 return IOU_OK;
1088 }
1089
1090 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1091 {
1092 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1093 unsigned flags;
1094
1095 if (sqe->len || sqe->buf_index)
1096 return -EINVAL;
1097
1098 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1099 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1100 accept->flags = READ_ONCE(sqe->accept_flags);
1101 accept->nofile = rlimit(RLIMIT_NOFILE);
1102 flags = READ_ONCE(sqe->ioprio);
1103 if (flags & ~IORING_ACCEPT_MULTISHOT)
1104 return -EINVAL;
1105
1106 accept->file_slot = READ_ONCE(sqe->file_index);
1107 if (accept->file_slot) {
1108 if (accept->flags & SOCK_CLOEXEC)
1109 return -EINVAL;
1110 if (flags & IORING_ACCEPT_MULTISHOT &&
1111 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1112 return -EINVAL;
1113 }
1114 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1115 return -EINVAL;
1116 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1117 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1118 if (flags & IORING_ACCEPT_MULTISHOT)
1119 req->flags |= REQ_F_APOLL_MULTISHOT;
1120 return 0;
1121 }
1122
1123 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1124 {
1125 struct io_ring_ctx *ctx = req->ctx;
1126 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1127 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1128 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1129 bool fixed = !!accept->file_slot;
1130 struct file *file;
1131 int ret, fd;
1132
1133 retry:
1134 if (!fixed) {
1135 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1136 if (unlikely(fd < 0))
1137 return fd;
1138 }
1139 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1140 accept->flags);
1141 if (IS_ERR(file)) {
1142 if (!fixed)
1143 put_unused_fd(fd);
1144 ret = PTR_ERR(file);
1145 if (ret == -EAGAIN && force_nonblock) {
1146
1147
1148
1149
1150
1151 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1152 IO_APOLL_MULTI_POLLED)
1153 ret = IOU_ISSUE_SKIP_COMPLETE;
1154 return ret;
1155 }
1156 if (ret == -ERESTARTSYS)
1157 ret = -EINTR;
1158 req_set_fail(req);
1159 } else if (!fixed) {
1160 fd_install(fd, file);
1161 ret = fd;
1162 } else {
1163 ret = io_fixed_fd_install(req, issue_flags, file,
1164 accept->file_slot);
1165 }
1166
1167 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1168 io_req_set_res(req, ret, 0);
1169 return IOU_OK;
1170 }
1171
1172 if (ret >= 0 &&
1173 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1174 goto retry;
1175
1176 io_req_set_res(req, ret, 0);
1177 if (req->flags & REQ_F_POLLED)
1178 return IOU_STOP_MULTISHOT;
1179 return IOU_OK;
1180 }
1181
1182 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1183 {
1184 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1185
1186 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1187 return -EINVAL;
1188
1189 sock->domain = READ_ONCE(sqe->fd);
1190 sock->type = READ_ONCE(sqe->off);
1191 sock->protocol = READ_ONCE(sqe->len);
1192 sock->file_slot = READ_ONCE(sqe->file_index);
1193 sock->nofile = rlimit(RLIMIT_NOFILE);
1194
1195 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1196 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1197 return -EINVAL;
1198 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1199 return -EINVAL;
1200 return 0;
1201 }
1202
1203 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1204 {
1205 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1206 bool fixed = !!sock->file_slot;
1207 struct file *file;
1208 int ret, fd;
1209
1210 if (!fixed) {
1211 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1212 if (unlikely(fd < 0))
1213 return fd;
1214 }
1215 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1216 if (IS_ERR(file)) {
1217 if (!fixed)
1218 put_unused_fd(fd);
1219 ret = PTR_ERR(file);
1220 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1221 return -EAGAIN;
1222 if (ret == -ERESTARTSYS)
1223 ret = -EINTR;
1224 req_set_fail(req);
1225 } else if (!fixed) {
1226 fd_install(fd, file);
1227 ret = fd;
1228 } else {
1229 ret = io_fixed_fd_install(req, issue_flags, file,
1230 sock->file_slot);
1231 }
1232 io_req_set_res(req, ret, 0);
1233 return IOU_OK;
1234 }
1235
1236 int io_connect_prep_async(struct io_kiocb *req)
1237 {
1238 struct io_async_connect *io = req->async_data;
1239 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1240
1241 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1242 }
1243
1244 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1245 {
1246 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1247
1248 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1249 return -EINVAL;
1250
1251 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1252 conn->addr_len = READ_ONCE(sqe->addr2);
1253 return 0;
1254 }
1255
1256 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1257 {
1258 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1259 struct io_async_connect __io, *io;
1260 unsigned file_flags;
1261 int ret;
1262 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1263
1264 if (req_has_async_data(req)) {
1265 io = req->async_data;
1266 } else {
1267 ret = move_addr_to_kernel(connect->addr,
1268 connect->addr_len,
1269 &__io.address);
1270 if (ret)
1271 goto out;
1272 io = &__io;
1273 }
1274
1275 file_flags = force_nonblock ? O_NONBLOCK : 0;
1276
1277 ret = __sys_connect_file(req->file, &io->address,
1278 connect->addr_len, file_flags);
1279 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1280 if (req_has_async_data(req))
1281 return -EAGAIN;
1282 if (io_alloc_async_data(req)) {
1283 ret = -ENOMEM;
1284 goto out;
1285 }
1286 memcpy(req->async_data, &__io, sizeof(__io));
1287 return -EAGAIN;
1288 }
1289 if (ret == -ERESTARTSYS)
1290 ret = -EINTR;
1291 out:
1292 if (ret < 0)
1293 req_set_fail(req);
1294 io_req_set_res(req, ret, 0);
1295 return IOU_OK;
1296 }
1297
1298 void io_netmsg_cache_free(struct io_cache_entry *entry)
1299 {
1300 kfree(container_of(entry, struct io_async_msghdr, cache));
1301 }
1302 #endif