Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/kernel.h>
0003 #include <linux/errno.h>
0004 #include <linux/file.h>
0005 #include <linux/slab.h>
0006 #include <linux/nospec.h>
0007 #include <linux/io_uring.h>
0008 
0009 #include <uapi/linux/io_uring.h>
0010 
0011 #include "io_uring.h"
0012 #include "rsrc.h"
0013 #include "filetable.h"
0014 #include "msg_ring.h"
0015 
0016 struct io_msg {
0017     struct file         *file;
0018     u64 user_data;
0019     u32 len;
0020     u32 cmd;
0021     u32 src_fd;
0022     u32 dst_fd;
0023     u32 flags;
0024 };
0025 
0026 static int io_msg_ring_data(struct io_kiocb *req)
0027 {
0028     struct io_ring_ctx *target_ctx = req->file->private_data;
0029     struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
0030 
0031     if (msg->src_fd || msg->dst_fd || msg->flags)
0032         return -EINVAL;
0033 
0034     if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
0035         return 0;
0036 
0037     return -EOVERFLOW;
0038 }
0039 
0040 static void io_double_unlock_ctx(struct io_ring_ctx *ctx,
0041                  struct io_ring_ctx *octx,
0042                  unsigned int issue_flags)
0043 {
0044     if (issue_flags & IO_URING_F_UNLOCKED)
0045         mutex_unlock(&ctx->uring_lock);
0046     mutex_unlock(&octx->uring_lock);
0047 }
0048 
0049 static int io_double_lock_ctx(struct io_ring_ctx *ctx,
0050                   struct io_ring_ctx *octx,
0051                   unsigned int issue_flags)
0052 {
0053     /*
0054      * To ensure proper ordering between the two ctxs, we can only
0055      * attempt a trylock on the target. If that fails and we already have
0056      * the source ctx lock, punt to io-wq.
0057      */
0058     if (!(issue_flags & IO_URING_F_UNLOCKED)) {
0059         if (!mutex_trylock(&octx->uring_lock))
0060             return -EAGAIN;
0061         return 0;
0062     }
0063 
0064     /* Always grab smallest value ctx first. We know ctx != octx. */
0065     if (ctx < octx) {
0066         mutex_lock(&ctx->uring_lock);
0067         mutex_lock(&octx->uring_lock);
0068     } else {
0069         mutex_lock(&octx->uring_lock);
0070         mutex_lock(&ctx->uring_lock);
0071     }
0072 
0073     return 0;
0074 }
0075 
0076 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
0077 {
0078     struct io_ring_ctx *target_ctx = req->file->private_data;
0079     struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
0080     struct io_ring_ctx *ctx = req->ctx;
0081     unsigned long file_ptr;
0082     struct file *src_file;
0083     int ret;
0084 
0085     if (target_ctx == ctx)
0086         return -EINVAL;
0087 
0088     ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
0089     if (unlikely(ret))
0090         return ret;
0091 
0092     ret = -EBADF;
0093     if (unlikely(msg->src_fd >= ctx->nr_user_files))
0094         goto out_unlock;
0095 
0096     msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
0097     file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
0098     src_file = (struct file *) (file_ptr & FFS_MASK);
0099     get_file(src_file);
0100 
0101     ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
0102     if (ret < 0) {
0103         fput(src_file);
0104         goto out_unlock;
0105     }
0106 
0107     if (msg->flags & IORING_MSG_RING_CQE_SKIP)
0108         goto out_unlock;
0109 
0110     /*
0111      * If this fails, the target still received the file descriptor but
0112      * wasn't notified of the fact. This means that if this request
0113      * completes with -EOVERFLOW, then the sender must ensure that a
0114      * later IORING_OP_MSG_RING delivers the message.
0115      */
0116     if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
0117         ret = -EOVERFLOW;
0118 out_unlock:
0119     io_double_unlock_ctx(ctx, target_ctx, issue_flags);
0120     return ret;
0121 }
0122 
0123 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0124 {
0125     struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
0126 
0127     if (unlikely(sqe->buf_index || sqe->personality))
0128         return -EINVAL;
0129 
0130     msg->user_data = READ_ONCE(sqe->off);
0131     msg->len = READ_ONCE(sqe->len);
0132     msg->cmd = READ_ONCE(sqe->addr);
0133     msg->src_fd = READ_ONCE(sqe->addr3);
0134     msg->dst_fd = READ_ONCE(sqe->file_index);
0135     msg->flags = READ_ONCE(sqe->msg_ring_flags);
0136     if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
0137         return -EINVAL;
0138 
0139     return 0;
0140 }
0141 
0142 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
0143 {
0144     struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
0145     int ret;
0146 
0147     ret = -EBADFD;
0148     if (!io_is_uring_fops(req->file))
0149         goto done;
0150 
0151     switch (msg->cmd) {
0152     case IORING_MSG_DATA:
0153         ret = io_msg_ring_data(req);
0154         break;
0155     case IORING_MSG_SEND_FD:
0156         ret = io_msg_send_fd(req, issue_flags);
0157         break;
0158     default:
0159         ret = -EINVAL;
0160         break;
0161     }
0162 
0163 done:
0164     if (ret < 0)
0165         req_set_fail(req);
0166     io_req_set_res(req, ret, 0);
0167     /* put file to avoid an attempt to IOPOLL the req */
0168     if (!(req->flags & REQ_F_FIXED_FILE))
0169         io_put_file(req->file);
0170     req->file = NULL;
0171     return IOU_OK;
0172 }