Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/kernel.h>
0003 #include <linux/errno.h>
0004 #include <linux/fs.h>
0005 #include <linux/file.h>
0006 #include <linux/mm.h>
0007 #include <linux/slab.h>
0008 #include <linux/namei.h>
0009 #include <linux/nospec.h>
0010 #include <linux/io_uring.h>
0011 
0012 #include <uapi/linux/io_uring.h>
0013 
0014 #include "io_uring.h"
0015 #include "tctx.h"
0016 #include "poll.h"
0017 #include "timeout.h"
0018 #include "cancel.h"
0019 
0020 struct io_cancel {
0021     struct file         *file;
0022     u64             addr;
0023     u32             flags;
0024     s32             fd;
0025 };
0026 
0027 #define CANCEL_FLAGS    (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
0028              IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
0029 
0030 static bool io_cancel_cb(struct io_wq_work *work, void *data)
0031 {
0032     struct io_kiocb *req = container_of(work, struct io_kiocb, work);
0033     struct io_cancel_data *cd = data;
0034 
0035     if (req->ctx != cd->ctx)
0036         return false;
0037     if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
0038         ;
0039     } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
0040         if (req->file != cd->file)
0041             return false;
0042     } else {
0043         if (req->cqe.user_data != cd->data)
0044             return false;
0045     }
0046     if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
0047         if (cd->seq == req->work.cancel_seq)
0048             return false;
0049         req->work.cancel_seq = cd->seq;
0050     }
0051     return true;
0052 }
0053 
0054 static int io_async_cancel_one(struct io_uring_task *tctx,
0055                    struct io_cancel_data *cd)
0056 {
0057     enum io_wq_cancel cancel_ret;
0058     int ret = 0;
0059     bool all;
0060 
0061     if (!tctx || !tctx->io_wq)
0062         return -ENOENT;
0063 
0064     all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
0065     cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
0066     switch (cancel_ret) {
0067     case IO_WQ_CANCEL_OK:
0068         ret = 0;
0069         break;
0070     case IO_WQ_CANCEL_RUNNING:
0071         ret = -EALREADY;
0072         break;
0073     case IO_WQ_CANCEL_NOTFOUND:
0074         ret = -ENOENT;
0075         break;
0076     }
0077 
0078     return ret;
0079 }
0080 
0081 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
0082           unsigned issue_flags)
0083 {
0084     struct io_ring_ctx *ctx = cd->ctx;
0085     int ret;
0086 
0087     WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
0088 
0089     ret = io_async_cancel_one(tctx, cd);
0090     /*
0091      * Fall-through even for -EALREADY, as we may have poll armed
0092      * that need unarming.
0093      */
0094     if (!ret)
0095         return 0;
0096 
0097     ret = io_poll_cancel(ctx, cd, issue_flags);
0098     if (ret != -ENOENT)
0099         return ret;
0100 
0101     spin_lock(&ctx->completion_lock);
0102     if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
0103         ret = io_timeout_cancel(ctx, cd);
0104     spin_unlock(&ctx->completion_lock);
0105     return ret;
0106 }
0107 
0108 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0109 {
0110     struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
0111 
0112     if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
0113         return -EINVAL;
0114     if (sqe->off || sqe->len || sqe->splice_fd_in)
0115         return -EINVAL;
0116 
0117     cancel->addr = READ_ONCE(sqe->addr);
0118     cancel->flags = READ_ONCE(sqe->cancel_flags);
0119     if (cancel->flags & ~CANCEL_FLAGS)
0120         return -EINVAL;
0121     if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
0122         if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
0123             return -EINVAL;
0124         cancel->fd = READ_ONCE(sqe->fd);
0125     }
0126 
0127     return 0;
0128 }
0129 
0130 static int __io_async_cancel(struct io_cancel_data *cd,
0131                  struct io_uring_task *tctx,
0132                  unsigned int issue_flags)
0133 {
0134     bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
0135     struct io_ring_ctx *ctx = cd->ctx;
0136     struct io_tctx_node *node;
0137     int ret, nr = 0;
0138 
0139     do {
0140         ret = io_try_cancel(tctx, cd, issue_flags);
0141         if (ret == -ENOENT)
0142             break;
0143         if (!all)
0144             return ret;
0145         nr++;
0146     } while (1);
0147 
0148     /* slow path, try all io-wq's */
0149     io_ring_submit_lock(ctx, issue_flags);
0150     ret = -ENOENT;
0151     list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
0152         struct io_uring_task *tctx = node->task->io_uring;
0153 
0154         ret = io_async_cancel_one(tctx, cd);
0155         if (ret != -ENOENT) {
0156             if (!all)
0157                 break;
0158             nr++;
0159         }
0160     }
0161     io_ring_submit_unlock(ctx, issue_flags);
0162     return all ? nr : ret;
0163 }
0164 
0165 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
0166 {
0167     struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
0168     struct io_cancel_data cd = {
0169         .ctx    = req->ctx,
0170         .data   = cancel->addr,
0171         .flags  = cancel->flags,
0172         .seq    = atomic_inc_return(&req->ctx->cancel_seq),
0173     };
0174     struct io_uring_task *tctx = req->task->io_uring;
0175     int ret;
0176 
0177     if (cd.flags & IORING_ASYNC_CANCEL_FD) {
0178         if (req->flags & REQ_F_FIXED_FILE ||
0179             cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
0180             req->flags |= REQ_F_FIXED_FILE;
0181             req->file = io_file_get_fixed(req, cancel->fd,
0182                             issue_flags);
0183         } else {
0184             req->file = io_file_get_normal(req, cancel->fd);
0185         }
0186         if (!req->file) {
0187             ret = -EBADF;
0188             goto done;
0189         }
0190         cd.file = req->file;
0191     }
0192 
0193     ret = __io_async_cancel(&cd, tctx, issue_flags);
0194 done:
0195     if (ret < 0)
0196         req_set_fail(req);
0197     io_req_set_res(req, ret, 0);
0198     return IOU_OK;
0199 }
0200 
0201 void init_hash_table(struct io_hash_table *table, unsigned size)
0202 {
0203     unsigned int i;
0204 
0205     for (i = 0; i < size; i++) {
0206         spin_lock_init(&table->hbs[i].lock);
0207         INIT_HLIST_HEAD(&table->hbs[i].list);
0208     }
0209 }
0210 
0211 static int __io_sync_cancel(struct io_uring_task *tctx,
0212                 struct io_cancel_data *cd, int fd)
0213 {
0214     struct io_ring_ctx *ctx = cd->ctx;
0215 
0216     /* fixed must be grabbed every time since we drop the uring_lock */
0217     if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
0218         (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
0219         unsigned long file_ptr;
0220 
0221         if (unlikely(fd >= ctx->nr_user_files))
0222             return -EBADF;
0223         fd = array_index_nospec(fd, ctx->nr_user_files);
0224         file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
0225         cd->file = (struct file *) (file_ptr & FFS_MASK);
0226         if (!cd->file)
0227             return -EBADF;
0228     }
0229 
0230     return __io_async_cancel(cd, tctx, 0);
0231 }
0232 
0233 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
0234     __must_hold(&ctx->uring_lock)
0235 {
0236     struct io_cancel_data cd = {
0237         .ctx    = ctx,
0238         .seq    = atomic_inc_return(&ctx->cancel_seq),
0239     };
0240     ktime_t timeout = KTIME_MAX;
0241     struct io_uring_sync_cancel_reg sc;
0242     struct fd f = { };
0243     DEFINE_WAIT(wait);
0244     int ret;
0245 
0246     if (copy_from_user(&sc, arg, sizeof(sc)))
0247         return -EFAULT;
0248     if (sc.flags & ~CANCEL_FLAGS)
0249         return -EINVAL;
0250     if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
0251         return -EINVAL;
0252 
0253     cd.data = sc.addr;
0254     cd.flags = sc.flags;
0255 
0256     /* we can grab a normal file descriptor upfront */
0257     if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
0258        !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
0259         f = fdget(sc.fd);
0260         if (!f.file)
0261             return -EBADF;
0262         cd.file = f.file;
0263     }
0264 
0265     ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
0266 
0267     /* found something, done! */
0268     if (ret != -EALREADY)
0269         goto out;
0270 
0271     if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
0272         struct timespec64 ts = {
0273             .tv_sec     = sc.timeout.tv_sec,
0274             .tv_nsec    = sc.timeout.tv_nsec
0275         };
0276 
0277         timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
0278     }
0279 
0280     /*
0281      * Keep looking until we get -ENOENT. we'll get woken everytime
0282      * every time a request completes and will retry the cancelation.
0283      */
0284     do {
0285         cd.seq = atomic_inc_return(&ctx->cancel_seq);
0286 
0287         prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
0288 
0289         ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
0290 
0291         if (ret != -EALREADY)
0292             break;
0293 
0294         mutex_unlock(&ctx->uring_lock);
0295         ret = io_run_task_work_sig();
0296         if (ret < 0) {
0297             mutex_lock(&ctx->uring_lock);
0298             break;
0299         }
0300         ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
0301         mutex_lock(&ctx->uring_lock);
0302         if (!ret) {
0303             ret = -ETIME;
0304             break;
0305         }
0306     } while (1);
0307 
0308     finish_wait(&ctx->cq_wait, &wait);
0309 
0310     if (ret == -ENOENT || ret > 0)
0311         ret = 0;
0312 out:
0313     fdput(f);
0314     return ret;
0315 }