0001
0002
0003
0004
0005
0006 #include <linux/ptrace.h> /* for force_successful_syscall_return */
0007 #include <linux/nvme_ioctl.h>
0008 #include <linux/io_uring.h>
0009 #include "nvme.h"
0010
0011
0012
0013
0014
0015
0016 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
0017 {
0018 if (in_compat_syscall())
0019 ptrval = (compat_uptr_t)ptrval;
0020 return (void __user *)ptrval;
0021 }
0022
0023 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
0024 unsigned len, u32 seed, bool write)
0025 {
0026 struct bio_integrity_payload *bip;
0027 int ret = -ENOMEM;
0028 void *buf;
0029
0030 buf = kmalloc(len, GFP_KERNEL);
0031 if (!buf)
0032 goto out;
0033
0034 ret = -EFAULT;
0035 if (write && copy_from_user(buf, ubuf, len))
0036 goto out_free_meta;
0037
0038 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
0039 if (IS_ERR(bip)) {
0040 ret = PTR_ERR(bip);
0041 goto out_free_meta;
0042 }
0043
0044 bip->bip_iter.bi_size = len;
0045 bip->bip_iter.bi_sector = seed;
0046 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
0047 offset_in_page(buf));
0048 if (ret == len)
0049 return buf;
0050 ret = -ENOMEM;
0051 out_free_meta:
0052 kfree(buf);
0053 out:
0054 return ERR_PTR(ret);
0055 }
0056
0057 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
0058 void *meta, unsigned len, int ret)
0059 {
0060 if (!ret && req_op(req) == REQ_OP_DRV_IN &&
0061 copy_to_user(ubuf, meta, len))
0062 ret = -EFAULT;
0063 kfree(meta);
0064 return ret;
0065 }
0066
0067 static struct request *nvme_alloc_user_request(struct request_queue *q,
0068 struct nvme_command *cmd, void __user *ubuffer,
0069 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
0070 u32 meta_seed, void **metap, unsigned timeout, bool vec,
0071 blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
0072 {
0073 bool write = nvme_is_write(cmd);
0074 struct nvme_ns *ns = q->queuedata;
0075 struct block_device *bdev = ns ? ns->disk->part0 : NULL;
0076 struct request *req;
0077 struct bio *bio = NULL;
0078 void *meta = NULL;
0079 int ret;
0080
0081 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
0082 if (IS_ERR(req))
0083 return req;
0084 nvme_init_request(req, cmd);
0085
0086 if (timeout)
0087 req->timeout = timeout;
0088 nvme_req(req)->flags |= NVME_REQ_USERCMD;
0089
0090 if (ubuffer && bufflen) {
0091 if (!vec)
0092 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
0093 GFP_KERNEL);
0094 else {
0095 struct iovec fast_iov[UIO_FASTIOV];
0096 struct iovec *iov = fast_iov;
0097 struct iov_iter iter;
0098
0099 ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
0100 UIO_FASTIOV, &iov, &iter);
0101 if (ret < 0)
0102 goto out;
0103 ret = blk_rq_map_user_iov(q, req, NULL, &iter,
0104 GFP_KERNEL);
0105 kfree(iov);
0106 }
0107 if (ret)
0108 goto out;
0109 bio = req->bio;
0110 if (bdev)
0111 bio_set_dev(bio, bdev);
0112 if (bdev && meta_buffer && meta_len) {
0113 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
0114 meta_seed, write);
0115 if (IS_ERR(meta)) {
0116 ret = PTR_ERR(meta);
0117 goto out_unmap;
0118 }
0119 req->cmd_flags |= REQ_INTEGRITY;
0120 *metap = meta;
0121 }
0122 }
0123
0124 return req;
0125
0126 out_unmap:
0127 if (bio)
0128 blk_rq_unmap_user(bio);
0129 out:
0130 blk_mq_free_request(req);
0131 return ERR_PTR(ret);
0132 }
0133
0134 static int nvme_submit_user_cmd(struct request_queue *q,
0135 struct nvme_command *cmd, void __user *ubuffer,
0136 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
0137 u32 meta_seed, u64 *result, unsigned timeout, bool vec)
0138 {
0139 struct request *req;
0140 void *meta = NULL;
0141 struct bio *bio;
0142 int ret;
0143
0144 req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
0145 meta_len, meta_seed, &meta, timeout, vec, 0, 0);
0146 if (IS_ERR(req))
0147 return PTR_ERR(req);
0148
0149 bio = req->bio;
0150
0151 ret = nvme_execute_passthru_rq(req);
0152
0153 if (result)
0154 *result = le64_to_cpu(nvme_req(req)->result.u64);
0155 if (meta)
0156 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
0157 meta_len, ret);
0158 if (bio)
0159 blk_rq_unmap_user(bio);
0160 blk_mq_free_request(req);
0161 return ret;
0162 }
0163
0164 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
0165 {
0166 struct nvme_user_io io;
0167 struct nvme_command c;
0168 unsigned length, meta_len;
0169 void __user *metadata;
0170
0171 if (copy_from_user(&io, uio, sizeof(io)))
0172 return -EFAULT;
0173 if (io.flags)
0174 return -EINVAL;
0175
0176 switch (io.opcode) {
0177 case nvme_cmd_write:
0178 case nvme_cmd_read:
0179 case nvme_cmd_compare:
0180 break;
0181 default:
0182 return -EINVAL;
0183 }
0184
0185 length = (io.nblocks + 1) << ns->lba_shift;
0186
0187 if ((io.control & NVME_RW_PRINFO_PRACT) &&
0188 ns->ms == sizeof(struct t10_pi_tuple)) {
0189
0190
0191
0192
0193 if (nvme_to_user_ptr(io.metadata))
0194 return -EINVAL;
0195 meta_len = 0;
0196 metadata = NULL;
0197 } else {
0198 meta_len = (io.nblocks + 1) * ns->ms;
0199 metadata = nvme_to_user_ptr(io.metadata);
0200 }
0201
0202 if (ns->features & NVME_NS_EXT_LBAS) {
0203 length += meta_len;
0204 meta_len = 0;
0205 } else if (meta_len) {
0206 if ((io.metadata & 3) || !io.metadata)
0207 return -EINVAL;
0208 }
0209
0210 memset(&c, 0, sizeof(c));
0211 c.rw.opcode = io.opcode;
0212 c.rw.flags = io.flags;
0213 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
0214 c.rw.slba = cpu_to_le64(io.slba);
0215 c.rw.length = cpu_to_le16(io.nblocks);
0216 c.rw.control = cpu_to_le16(io.control);
0217 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
0218 c.rw.reftag = cpu_to_le32(io.reftag);
0219 c.rw.apptag = cpu_to_le16(io.apptag);
0220 c.rw.appmask = cpu_to_le16(io.appmask);
0221
0222 return nvme_submit_user_cmd(ns->queue, &c,
0223 nvme_to_user_ptr(io.addr), length,
0224 metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
0225 false);
0226 }
0227
0228 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
0229 struct nvme_ns *ns, __u32 nsid)
0230 {
0231 if (ns && nsid != ns->head->ns_id) {
0232 dev_err(ctrl->device,
0233 "%s: nsid (%u) in cmd does not match nsid (%u)"
0234 "of namespace\n",
0235 current->comm, nsid, ns->head->ns_id);
0236 return false;
0237 }
0238
0239 return true;
0240 }
0241
0242 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
0243 struct nvme_passthru_cmd __user *ucmd)
0244 {
0245 struct nvme_passthru_cmd cmd;
0246 struct nvme_command c;
0247 unsigned timeout = 0;
0248 u64 result;
0249 int status;
0250
0251 if (!capable(CAP_SYS_ADMIN))
0252 return -EACCES;
0253 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
0254 return -EFAULT;
0255 if (cmd.flags)
0256 return -EINVAL;
0257 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
0258 return -EINVAL;
0259
0260 memset(&c, 0, sizeof(c));
0261 c.common.opcode = cmd.opcode;
0262 c.common.flags = cmd.flags;
0263 c.common.nsid = cpu_to_le32(cmd.nsid);
0264 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
0265 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
0266 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
0267 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
0268 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
0269 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
0270 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
0271 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
0272
0273 if (cmd.timeout_ms)
0274 timeout = msecs_to_jiffies(cmd.timeout_ms);
0275
0276 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
0277 nvme_to_user_ptr(cmd.addr), cmd.data_len,
0278 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0279 0, &result, timeout, false);
0280
0281 if (status >= 0) {
0282 if (put_user(result, &ucmd->result))
0283 return -EFAULT;
0284 }
0285
0286 return status;
0287 }
0288
0289 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
0290 struct nvme_passthru_cmd64 __user *ucmd, bool vec)
0291 {
0292 struct nvme_passthru_cmd64 cmd;
0293 struct nvme_command c;
0294 unsigned timeout = 0;
0295 int status;
0296
0297 if (!capable(CAP_SYS_ADMIN))
0298 return -EACCES;
0299 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
0300 return -EFAULT;
0301 if (cmd.flags)
0302 return -EINVAL;
0303 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
0304 return -EINVAL;
0305
0306 memset(&c, 0, sizeof(c));
0307 c.common.opcode = cmd.opcode;
0308 c.common.flags = cmd.flags;
0309 c.common.nsid = cpu_to_le32(cmd.nsid);
0310 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
0311 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
0312 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
0313 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
0314 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
0315 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
0316 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
0317 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
0318
0319 if (cmd.timeout_ms)
0320 timeout = msecs_to_jiffies(cmd.timeout_ms);
0321
0322 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
0323 nvme_to_user_ptr(cmd.addr), cmd.data_len,
0324 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0325 0, &cmd.result, timeout, vec);
0326
0327 if (status >= 0) {
0328 if (put_user(cmd.result, &ucmd->result))
0329 return -EFAULT;
0330 }
0331
0332 return status;
0333 }
0334
0335 struct nvme_uring_data {
0336 __u64 metadata;
0337 __u64 addr;
0338 __u32 data_len;
0339 __u32 metadata_len;
0340 __u32 timeout_ms;
0341 };
0342
0343
0344
0345
0346
0347 struct nvme_uring_cmd_pdu {
0348 union {
0349 struct bio *bio;
0350 struct request *req;
0351 };
0352 void *meta;
0353 void __user *meta_buffer;
0354 u32 meta_len;
0355 };
0356
0357 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
0358 struct io_uring_cmd *ioucmd)
0359 {
0360 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
0361 }
0362
0363 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
0364 {
0365 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
0366 struct request *req = pdu->req;
0367 struct bio *bio = req->bio;
0368 int status;
0369 u64 result;
0370
0371 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
0372 status = -EINTR;
0373 else
0374 status = nvme_req(req)->status;
0375
0376 result = le64_to_cpu(nvme_req(req)->result.u64);
0377
0378 if (pdu->meta)
0379 status = nvme_finish_user_metadata(req, pdu->meta_buffer,
0380 pdu->meta, pdu->meta_len, status);
0381 if (bio)
0382 blk_rq_unmap_user(bio);
0383 blk_mq_free_request(req);
0384
0385 io_uring_cmd_done(ioucmd, status, result);
0386 }
0387
0388 static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
0389 {
0390 struct io_uring_cmd *ioucmd = req->end_io_data;
0391 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
0392
0393 struct bio *bio = pdu->bio;
0394
0395 pdu->req = req;
0396 req->bio = bio;
0397
0398 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
0399 }
0400
0401 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
0402 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
0403 {
0404 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
0405 const struct nvme_uring_cmd *cmd = ioucmd->cmd;
0406 struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
0407 struct nvme_uring_data d;
0408 struct nvme_command c;
0409 struct request *req;
0410 blk_opf_t rq_flags = 0;
0411 blk_mq_req_flags_t blk_flags = 0;
0412 void *meta = NULL;
0413
0414 if (!capable(CAP_SYS_ADMIN))
0415 return -EACCES;
0416
0417 c.common.opcode = READ_ONCE(cmd->opcode);
0418 c.common.flags = READ_ONCE(cmd->flags);
0419 if (c.common.flags)
0420 return -EINVAL;
0421
0422 c.common.command_id = 0;
0423 c.common.nsid = cpu_to_le32(cmd->nsid);
0424 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
0425 return -EINVAL;
0426
0427 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
0428 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
0429 c.common.metadata = 0;
0430 c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
0431 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
0432 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
0433 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
0434 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
0435 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
0436 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
0437
0438 d.metadata = READ_ONCE(cmd->metadata);
0439 d.addr = READ_ONCE(cmd->addr);
0440 d.data_len = READ_ONCE(cmd->data_len);
0441 d.metadata_len = READ_ONCE(cmd->metadata_len);
0442 d.timeout_ms = READ_ONCE(cmd->timeout_ms);
0443
0444 if (issue_flags & IO_URING_F_NONBLOCK) {
0445 rq_flags = REQ_NOWAIT;
0446 blk_flags = BLK_MQ_REQ_NOWAIT;
0447 }
0448
0449 req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
0450 d.data_len, nvme_to_user_ptr(d.metadata),
0451 d.metadata_len, 0, &meta, d.timeout_ms ?
0452 msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
0453 blk_flags);
0454 if (IS_ERR(req))
0455 return PTR_ERR(req);
0456 req->end_io = nvme_uring_cmd_end_io;
0457 req->end_io_data = ioucmd;
0458
0459
0460 pdu->bio = req->bio;
0461 pdu->meta = meta;
0462 pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
0463 pdu->meta_len = d.metadata_len;
0464
0465 blk_execute_rq_nowait(req, false);
0466 return -EIOCBQUEUED;
0467 }
0468
0469 static bool is_ctrl_ioctl(unsigned int cmd)
0470 {
0471 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
0472 return true;
0473 if (is_sed_ioctl(cmd))
0474 return true;
0475 return false;
0476 }
0477
0478 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
0479 void __user *argp)
0480 {
0481 switch (cmd) {
0482 case NVME_IOCTL_ADMIN_CMD:
0483 return nvme_user_cmd(ctrl, NULL, argp);
0484 case NVME_IOCTL_ADMIN64_CMD:
0485 return nvme_user_cmd64(ctrl, NULL, argp, false);
0486 default:
0487 return sed_ioctl(ctrl->opal_dev, cmd, argp);
0488 }
0489 }
0490
0491 #ifdef COMPAT_FOR_U64_ALIGNMENT
0492 struct nvme_user_io32 {
0493 __u8 opcode;
0494 __u8 flags;
0495 __u16 control;
0496 __u16 nblocks;
0497 __u16 rsvd;
0498 __u64 metadata;
0499 __u64 addr;
0500 __u64 slba;
0501 __u32 dsmgmt;
0502 __u32 reftag;
0503 __u16 apptag;
0504 __u16 appmask;
0505 } __attribute__((__packed__));
0506 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
0507 #endif
0508
0509 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
0510 void __user *argp)
0511 {
0512 switch (cmd) {
0513 case NVME_IOCTL_ID:
0514 force_successful_syscall_return();
0515 return ns->head->ns_id;
0516 case NVME_IOCTL_IO_CMD:
0517 return nvme_user_cmd(ns->ctrl, ns, argp);
0518
0519
0520
0521
0522
0523 #ifdef COMPAT_FOR_U64_ALIGNMENT
0524 case NVME_IOCTL_SUBMIT_IO32:
0525 #endif
0526 case NVME_IOCTL_SUBMIT_IO:
0527 return nvme_submit_io(ns, argp);
0528 case NVME_IOCTL_IO64_CMD:
0529 return nvme_user_cmd64(ns->ctrl, ns, argp, false);
0530 case NVME_IOCTL_IO64_CMD_VEC:
0531 return nvme_user_cmd64(ns->ctrl, ns, argp, true);
0532 default:
0533 return -ENOTTY;
0534 }
0535 }
0536
0537 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
0538 {
0539 if (is_ctrl_ioctl(cmd))
0540 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
0541 return nvme_ns_ioctl(ns, cmd, arg);
0542 }
0543
0544 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
0545 unsigned int cmd, unsigned long arg)
0546 {
0547 struct nvme_ns *ns = bdev->bd_disk->private_data;
0548
0549 return __nvme_ioctl(ns, cmd, (void __user *)arg);
0550 }
0551
0552 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
0553 {
0554 struct nvme_ns *ns =
0555 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
0556
0557 return __nvme_ioctl(ns, cmd, (void __user *)arg);
0558 }
0559
0560 static int nvme_uring_cmd_checks(unsigned int issue_flags)
0561 {
0562
0563 if (issue_flags & IO_URING_F_IOPOLL)
0564 return -EOPNOTSUPP;
0565
0566
0567 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
0568 (IO_URING_F_SQE128|IO_URING_F_CQE32))
0569 return -EOPNOTSUPP;
0570 return 0;
0571 }
0572
0573 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
0574 unsigned int issue_flags)
0575 {
0576 struct nvme_ctrl *ctrl = ns->ctrl;
0577 int ret;
0578
0579 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
0580
0581 ret = nvme_uring_cmd_checks(issue_flags);
0582 if (ret)
0583 return ret;
0584
0585 switch (ioucmd->cmd_op) {
0586 case NVME_URING_CMD_IO:
0587 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
0588 break;
0589 case NVME_URING_CMD_IO_VEC:
0590 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
0591 break;
0592 default:
0593 ret = -ENOTTY;
0594 }
0595
0596 return ret;
0597 }
0598
0599 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
0600 {
0601 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
0602 struct nvme_ns, cdev);
0603
0604 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
0605 }
0606
0607 #ifdef CONFIG_NVME_MULTIPATH
0608 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
0609 void __user *argp, struct nvme_ns_head *head, int srcu_idx)
0610 __releases(&head->srcu)
0611 {
0612 struct nvme_ctrl *ctrl = ns->ctrl;
0613 int ret;
0614
0615 nvme_get_ctrl(ns->ctrl);
0616 srcu_read_unlock(&head->srcu, srcu_idx);
0617 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
0618
0619 nvme_put_ctrl(ctrl);
0620 return ret;
0621 }
0622
0623 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
0624 unsigned int cmd, unsigned long arg)
0625 {
0626 struct nvme_ns_head *head = bdev->bd_disk->private_data;
0627 void __user *argp = (void __user *)arg;
0628 struct nvme_ns *ns;
0629 int srcu_idx, ret = -EWOULDBLOCK;
0630
0631 srcu_idx = srcu_read_lock(&head->srcu);
0632 ns = nvme_find_path(head);
0633 if (!ns)
0634 goto out_unlock;
0635
0636
0637
0638
0639
0640
0641 if (is_ctrl_ioctl(cmd))
0642 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
0643
0644 ret = nvme_ns_ioctl(ns, cmd, argp);
0645 out_unlock:
0646 srcu_read_unlock(&head->srcu, srcu_idx);
0647 return ret;
0648 }
0649
0650 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
0651 unsigned long arg)
0652 {
0653 struct cdev *cdev = file_inode(file)->i_cdev;
0654 struct nvme_ns_head *head =
0655 container_of(cdev, struct nvme_ns_head, cdev);
0656 void __user *argp = (void __user *)arg;
0657 struct nvme_ns *ns;
0658 int srcu_idx, ret = -EWOULDBLOCK;
0659
0660 srcu_idx = srcu_read_lock(&head->srcu);
0661 ns = nvme_find_path(head);
0662 if (!ns)
0663 goto out_unlock;
0664
0665 if (is_ctrl_ioctl(cmd))
0666 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
0667
0668 ret = nvme_ns_ioctl(ns, cmd, argp);
0669 out_unlock:
0670 srcu_read_unlock(&head->srcu, srcu_idx);
0671 return ret;
0672 }
0673
0674 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
0675 unsigned int issue_flags)
0676 {
0677 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
0678 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
0679 int srcu_idx = srcu_read_lock(&head->srcu);
0680 struct nvme_ns *ns = nvme_find_path(head);
0681 int ret = -EINVAL;
0682
0683 if (ns)
0684 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
0685 srcu_read_unlock(&head->srcu, srcu_idx);
0686 return ret;
0687 }
0688 #endif
0689
0690 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
0691 {
0692 struct nvme_ctrl *ctrl = ioucmd->file->private_data;
0693 int ret;
0694
0695 ret = nvme_uring_cmd_checks(issue_flags);
0696 if (ret)
0697 return ret;
0698
0699 switch (ioucmd->cmd_op) {
0700 case NVME_URING_CMD_ADMIN:
0701 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
0702 break;
0703 case NVME_URING_CMD_ADMIN_VEC:
0704 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
0705 break;
0706 default:
0707 ret = -ENOTTY;
0708 }
0709
0710 return ret;
0711 }
0712
0713 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
0714 {
0715 struct nvme_ns *ns;
0716 int ret;
0717
0718 down_read(&ctrl->namespaces_rwsem);
0719 if (list_empty(&ctrl->namespaces)) {
0720 ret = -ENOTTY;
0721 goto out_unlock;
0722 }
0723
0724 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
0725 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
0726 dev_warn(ctrl->device,
0727 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
0728 ret = -EINVAL;
0729 goto out_unlock;
0730 }
0731
0732 dev_warn(ctrl->device,
0733 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
0734 kref_get(&ns->kref);
0735 up_read(&ctrl->namespaces_rwsem);
0736
0737 ret = nvme_user_cmd(ctrl, ns, argp);
0738 nvme_put_ns(ns);
0739 return ret;
0740
0741 out_unlock:
0742 up_read(&ctrl->namespaces_rwsem);
0743 return ret;
0744 }
0745
0746 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
0747 unsigned long arg)
0748 {
0749 struct nvme_ctrl *ctrl = file->private_data;
0750 void __user *argp = (void __user *)arg;
0751
0752 switch (cmd) {
0753 case NVME_IOCTL_ADMIN_CMD:
0754 return nvme_user_cmd(ctrl, NULL, argp);
0755 case NVME_IOCTL_ADMIN64_CMD:
0756 return nvme_user_cmd64(ctrl, NULL, argp, false);
0757 case NVME_IOCTL_IO_CMD:
0758 return nvme_dev_user_cmd(ctrl, argp);
0759 case NVME_IOCTL_RESET:
0760 dev_warn(ctrl->device, "resetting controller\n");
0761 return nvme_reset_ctrl_sync(ctrl);
0762 case NVME_IOCTL_SUBSYS_RESET:
0763 return nvme_reset_subsystem(ctrl);
0764 case NVME_IOCTL_RESCAN:
0765 nvme_queue_scan(ctrl);
0766 return 0;
0767 default:
0768 return -ENOTTY;
0769 }
0770 }