0001
0002 #include <linux/kernel.h>
0003 #include <linux/errno.h>
0004 #include <linux/file.h>
0005 #include <linux/io_uring.h>
0006 #include <linux/security.h>
0007
0008 #include <uapi/linux/io_uring.h>
0009
0010 #include "io_uring.h"
0011 #include "uring_cmd.h"
0012
0013 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
0014 {
0015 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
0016
0017 ioucmd->task_work_cb(ioucmd);
0018 }
0019
0020 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
0021 void (*task_work_cb)(struct io_uring_cmd *))
0022 {
0023 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
0024
0025 ioucmd->task_work_cb = task_work_cb;
0026 req->io_task_work.func = io_uring_cmd_work;
0027 io_req_task_work_add(req);
0028 }
0029 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
0030
0031 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
0032 u64 extra1, u64 extra2)
0033 {
0034 req->extra1 = extra1;
0035 req->extra2 = extra2;
0036 req->flags |= REQ_F_CQE32_INIT;
0037 }
0038
0039
0040
0041
0042
0043 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
0044 {
0045 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
0046
0047 if (ret < 0)
0048 req_set_fail(req);
0049
0050 io_req_set_res(req, ret, 0);
0051 if (req->ctx->flags & IORING_SETUP_CQE32)
0052 io_req_set_cqe32_extra(req, res2, 0);
0053 __io_req_complete(req, 0);
0054 }
0055 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
0056
0057 int io_uring_cmd_prep_async(struct io_kiocb *req)
0058 {
0059 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
0060 size_t cmd_size;
0061
0062 BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
0063 BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
0064
0065 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
0066
0067 memcpy(req->async_data, ioucmd->cmd, cmd_size);
0068 return 0;
0069 }
0070
0071 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0072 {
0073 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
0074
0075 if (sqe->rw_flags || sqe->__pad1)
0076 return -EINVAL;
0077 ioucmd->cmd = sqe->cmd;
0078 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
0079 return 0;
0080 }
0081
0082 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
0083 {
0084 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
0085 struct io_ring_ctx *ctx = req->ctx;
0086 struct file *file = req->file;
0087 int ret;
0088
0089 if (!req->file->f_op->uring_cmd)
0090 return -EOPNOTSUPP;
0091
0092 ret = security_uring_cmd(ioucmd);
0093 if (ret)
0094 return ret;
0095
0096 if (ctx->flags & IORING_SETUP_SQE128)
0097 issue_flags |= IO_URING_F_SQE128;
0098 if (ctx->flags & IORING_SETUP_CQE32)
0099 issue_flags |= IO_URING_F_CQE32;
0100 if (ctx->flags & IORING_SETUP_IOPOLL)
0101 issue_flags |= IO_URING_F_IOPOLL;
0102
0103 if (req_has_async_data(req))
0104 ioucmd->cmd = req->async_data;
0105
0106 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
0107 if (ret == -EAGAIN) {
0108 if (!req_has_async_data(req)) {
0109 if (io_alloc_async_data(req))
0110 return -ENOMEM;
0111 io_uring_cmd_prep_async(req);
0112 }
0113 return -EAGAIN;
0114 }
0115
0116 if (ret != -EIOCBQUEUED) {
0117 if (ret < 0)
0118 req_set_fail(req);
0119 io_req_set_res(req, ret, 0);
0120 return ret;
0121 }
0122
0123 return IOU_ISSUE_SKIP_COMPLETE;
0124 }