0001
0002 #include <linux/kernel.h>
0003 #include <linux/errno.h>
0004 #include <linux/fs.h>
0005 #include <linux/file.h>
0006 #include <linux/mm.h>
0007 #include <linux/slab.h>
0008 #include <linux/poll.h>
0009 #include <linux/hashtable.h>
0010 #include <linux/io_uring.h>
0011
0012 #include <trace/events/io_uring.h>
0013
0014 #include <uapi/linux/io_uring.h>
0015
0016 #include "io_uring.h"
0017 #include "refs.h"
0018 #include "opdef.h"
0019 #include "kbuf.h"
0020 #include "poll.h"
0021 #include "cancel.h"
0022
0023 struct io_poll_update {
0024 struct file *file;
0025 u64 old_user_data;
0026 u64 new_user_data;
0027 __poll_t events;
0028 bool update_events;
0029 bool update_user_data;
0030 };
0031
0032 struct io_poll_table {
0033 struct poll_table_struct pt;
0034 struct io_kiocb *req;
0035 int nr_entries;
0036 int error;
0037 bool owning;
0038
0039 __poll_t result_mask;
0040 };
0041
0042 #define IO_POLL_CANCEL_FLAG BIT(31)
0043 #define IO_POLL_REF_MASK GENMASK(30, 0)
0044
0045 #define IO_WQE_F_DOUBLE 1
0046
0047 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
0048 {
0049 unsigned long priv = (unsigned long)wqe->private;
0050
0051 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
0052 }
0053
0054 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
0055 {
0056 unsigned long priv = (unsigned long)wqe->private;
0057
0058 return priv & IO_WQE_F_DOUBLE;
0059 }
0060
0061
0062
0063
0064
0065
0066
0067 static inline bool io_poll_get_ownership(struct io_kiocb *req)
0068 {
0069 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
0070 }
0071
0072 static void io_poll_mark_cancelled(struct io_kiocb *req)
0073 {
0074 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
0075 }
0076
0077 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
0078 {
0079
0080 if (req->opcode == IORING_OP_POLL_ADD)
0081 return req->async_data;
0082 return req->apoll->double_poll;
0083 }
0084
0085 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
0086 {
0087 if (req->opcode == IORING_OP_POLL_ADD)
0088 return io_kiocb_to_cmd(req, struct io_poll);
0089 return &req->apoll->poll;
0090 }
0091
0092 static void io_poll_req_insert(struct io_kiocb *req)
0093 {
0094 struct io_hash_table *table = &req->ctx->cancel_table;
0095 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
0096 struct io_hash_bucket *hb = &table->hbs[index];
0097
0098 spin_lock(&hb->lock);
0099 hlist_add_head(&req->hash_node, &hb->list);
0100 spin_unlock(&hb->lock);
0101 }
0102
0103 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
0104 {
0105 struct io_hash_table *table = &req->ctx->cancel_table;
0106 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
0107 spinlock_t *lock = &table->hbs[index].lock;
0108
0109 spin_lock(lock);
0110 hash_del(&req->hash_node);
0111 spin_unlock(lock);
0112 }
0113
0114 static void io_poll_req_insert_locked(struct io_kiocb *req)
0115 {
0116 struct io_hash_table *table = &req->ctx->cancel_table_locked;
0117 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
0118
0119 hlist_add_head(&req->hash_node, &table->hbs[index].list);
0120 }
0121
0122 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
0123 {
0124 struct io_ring_ctx *ctx = req->ctx;
0125
0126 if (req->flags & REQ_F_HASH_LOCKED) {
0127
0128
0129
0130
0131
0132
0133 io_tw_lock(ctx, locked);
0134 hash_del(&req->hash_node);
0135 req->flags &= ~REQ_F_HASH_LOCKED;
0136 } else {
0137 io_poll_req_delete(req, ctx);
0138 }
0139 }
0140
0141 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
0142 wait_queue_func_t wake_func)
0143 {
0144 poll->head = NULL;
0145 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
0146
0147 poll->events = events | IO_POLL_UNMASK;
0148 INIT_LIST_HEAD(&poll->wait.entry);
0149 init_waitqueue_func_entry(&poll->wait, wake_func);
0150 }
0151
0152 static inline void io_poll_remove_entry(struct io_poll *poll)
0153 {
0154 struct wait_queue_head *head = smp_load_acquire(&poll->head);
0155
0156 if (head) {
0157 spin_lock_irq(&head->lock);
0158 list_del_init(&poll->wait.entry);
0159 poll->head = NULL;
0160 spin_unlock_irq(&head->lock);
0161 }
0162 }
0163
0164 static void io_poll_remove_entries(struct io_kiocb *req)
0165 {
0166
0167
0168
0169
0170 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
0171 return;
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 rcu_read_lock();
0189 if (req->flags & REQ_F_SINGLE_POLL)
0190 io_poll_remove_entry(io_poll_get_single(req));
0191 if (req->flags & REQ_F_DOUBLE_POLL)
0192 io_poll_remove_entry(io_poll_get_double(req));
0193 rcu_read_unlock();
0194 }
0195
0196 enum {
0197 IOU_POLL_DONE = 0,
0198 IOU_POLL_NO_ACTION = 1,
0199 IOU_POLL_REMOVE_POLL_USE_RES = 2,
0200 };
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
0213 {
0214 struct io_ring_ctx *ctx = req->ctx;
0215 int v, ret;
0216
0217
0218 if (unlikely(req->task->flags & PF_EXITING))
0219 return -ECANCELED;
0220
0221 do {
0222 v = atomic_read(&req->poll_refs);
0223
0224
0225 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
0226 return IOU_POLL_DONE;
0227 if (v & IO_POLL_CANCEL_FLAG)
0228 return -ECANCELED;
0229
0230
0231 if (!req->cqe.res) {
0232 struct poll_table_struct pt = { ._key = req->apoll_events };
0233 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
0234 }
0235
0236 if ((unlikely(!req->cqe.res)))
0237 continue;
0238 if (req->apoll_events & EPOLLONESHOT)
0239 return IOU_POLL_DONE;
0240
0241
0242 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
0243 __poll_t mask = mangle_poll(req->cqe.res &
0244 req->apoll_events);
0245
0246 if (!io_post_aux_cqe(ctx, req->cqe.user_data,
0247 mask, IORING_CQE_F_MORE, false)) {
0248 io_req_set_res(req, mask, 0);
0249 return IOU_POLL_REMOVE_POLL_USE_RES;
0250 }
0251 } else {
0252 ret = io_poll_issue(req, locked);
0253 if (ret == IOU_STOP_MULTISHOT)
0254 return IOU_POLL_REMOVE_POLL_USE_RES;
0255 if (ret < 0)
0256 return ret;
0257 }
0258
0259
0260
0261
0262
0263 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
0264
0265 return IOU_POLL_NO_ACTION;
0266 }
0267
0268 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
0269 {
0270 int ret;
0271
0272 ret = io_poll_check_events(req, locked);
0273 if (ret == IOU_POLL_NO_ACTION)
0274 return;
0275
0276 if (ret == IOU_POLL_DONE) {
0277 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
0278 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
0279 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
0280 req->cqe.res = ret;
0281 req_set_fail(req);
0282 }
0283
0284 io_poll_remove_entries(req);
0285 io_poll_tw_hash_eject(req, locked);
0286
0287 io_req_set_res(req, req->cqe.res, 0);
0288 io_req_task_complete(req, locked);
0289 }
0290
0291 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
0292 {
0293 int ret;
0294
0295 ret = io_poll_check_events(req, locked);
0296 if (ret == IOU_POLL_NO_ACTION)
0297 return;
0298
0299 io_poll_remove_entries(req);
0300 io_poll_tw_hash_eject(req, locked);
0301
0302 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
0303 io_req_complete_post(req);
0304 else if (ret == IOU_POLL_DONE)
0305 io_req_task_submit(req, locked);
0306 else
0307 io_req_complete_failed(req, ret);
0308 }
0309
0310 static void __io_poll_execute(struct io_kiocb *req, int mask)
0311 {
0312 io_req_set_res(req, mask, 0);
0313
0314
0315
0316
0317
0318
0319 if (req->opcode == IORING_OP_POLL_ADD)
0320 req->io_task_work.func = io_poll_task_func;
0321 else
0322 req->io_task_work.func = io_apoll_task_func;
0323
0324 trace_io_uring_task_add(req, mask);
0325 io_req_task_work_add(req);
0326 }
0327
0328 static inline void io_poll_execute(struct io_kiocb *req, int res)
0329 {
0330 if (io_poll_get_ownership(req))
0331 __io_poll_execute(req, res);
0332 }
0333
0334 static void io_poll_cancel_req(struct io_kiocb *req)
0335 {
0336 io_poll_mark_cancelled(req);
0337
0338 io_poll_execute(req, 0);
0339 }
0340
0341 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
0342
0343 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
0344 {
0345 io_poll_mark_cancelled(req);
0346
0347 io_poll_execute(req, 0);
0348
0349
0350
0351
0352
0353
0354
0355
0356 list_del_init(&poll->wait.entry);
0357
0358
0359
0360
0361
0362
0363
0364 smp_store_release(&poll->head, NULL);
0365 return 1;
0366 }
0367
0368 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
0369 void *key)
0370 {
0371 struct io_kiocb *req = wqe_to_req(wait);
0372 struct io_poll *poll = container_of(wait, struct io_poll, wait);
0373 __poll_t mask = key_to_poll(key);
0374
0375 if (unlikely(mask & POLLFREE))
0376 return io_pollfree_wake(req, poll);
0377
0378
0379 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
0380 return 0;
0381
0382 if (io_poll_get_ownership(req)) {
0383
0384 if (mask && poll->events & EPOLLONESHOT) {
0385 list_del_init(&poll->wait.entry);
0386 poll->head = NULL;
0387 if (wqe_is_double(wait))
0388 req->flags &= ~REQ_F_DOUBLE_POLL;
0389 else
0390 req->flags &= ~REQ_F_SINGLE_POLL;
0391 }
0392 __io_poll_execute(req, mask);
0393 }
0394 return 1;
0395 }
0396
0397 static void io_poll_double_prepare(struct io_kiocb *req)
0398 {
0399 struct wait_queue_head *head;
0400 struct io_poll *poll = io_poll_get_single(req);
0401
0402
0403 rcu_read_lock();
0404 head = smp_load_acquire(&poll->head);
0405
0406
0407
0408
0409
0410 if (head)
0411 spin_lock_irq(&head->lock);
0412
0413 req->flags |= REQ_F_DOUBLE_POLL;
0414 if (req->opcode == IORING_OP_POLL_ADD)
0415 req->flags |= REQ_F_ASYNC_DATA;
0416
0417 if (head)
0418 spin_unlock_irq(&head->lock);
0419 rcu_read_unlock();
0420 }
0421
0422 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
0423 struct wait_queue_head *head,
0424 struct io_poll **poll_ptr)
0425 {
0426 struct io_kiocb *req = pt->req;
0427 unsigned long wqe_private = (unsigned long) req;
0428
0429
0430
0431
0432
0433
0434 if (unlikely(pt->nr_entries)) {
0435 struct io_poll *first = poll;
0436
0437
0438 if (first->head == head)
0439 return;
0440
0441 if (*poll_ptr) {
0442 if ((*poll_ptr)->head == head)
0443 return;
0444 pt->error = -EINVAL;
0445 return;
0446 }
0447
0448 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
0449 if (!poll) {
0450 pt->error = -ENOMEM;
0451 return;
0452 }
0453
0454
0455 wqe_private |= IO_WQE_F_DOUBLE;
0456 io_init_poll_iocb(poll, first->events, first->wait.func);
0457 io_poll_double_prepare(req);
0458 *poll_ptr = poll;
0459 } else {
0460
0461 req->flags |= REQ_F_SINGLE_POLL;
0462 }
0463
0464 pt->nr_entries++;
0465 poll->head = head;
0466 poll->wait.private = (void *) wqe_private;
0467
0468 if (poll->events & EPOLLEXCLUSIVE)
0469 add_wait_queue_exclusive(head, &poll->wait);
0470 else
0471 add_wait_queue(head, &poll->wait);
0472 }
0473
0474 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
0475 struct poll_table_struct *p)
0476 {
0477 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
0478 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
0479
0480 __io_queue_proc(poll, pt, head,
0481 (struct io_poll **) &pt->req->async_data);
0482 }
0483
0484 static bool io_poll_can_finish_inline(struct io_kiocb *req,
0485 struct io_poll_table *pt)
0486 {
0487 return pt->owning || io_poll_get_ownership(req);
0488 }
0489
0490
0491
0492
0493
0494
0495
0496 static int __io_arm_poll_handler(struct io_kiocb *req,
0497 struct io_poll *poll,
0498 struct io_poll_table *ipt, __poll_t mask,
0499 unsigned issue_flags)
0500 {
0501 struct io_ring_ctx *ctx = req->ctx;
0502 int v;
0503
0504 INIT_HLIST_NODE(&req->hash_node);
0505 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
0506 io_init_poll_iocb(poll, mask, io_poll_wake);
0507 poll->file = req->file;
0508 req->apoll_events = poll->events;
0509
0510 ipt->pt._key = mask;
0511 ipt->req = req;
0512 ipt->error = 0;
0513 ipt->nr_entries = 0;
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
0526 atomic_set(&req->poll_refs, (int)ipt->owning);
0527
0528
0529 if (issue_flags & IO_URING_F_UNLOCKED)
0530 req->flags &= ~REQ_F_HASH_LOCKED;
0531
0532 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
0533
0534 if (unlikely(ipt->error || !ipt->nr_entries)) {
0535 io_poll_remove_entries(req);
0536
0537 if (!io_poll_can_finish_inline(req, ipt)) {
0538 io_poll_mark_cancelled(req);
0539 return 0;
0540 } else if (mask && (poll->events & EPOLLET)) {
0541 ipt->result_mask = mask;
0542 return 1;
0543 }
0544 return ipt->error ?: -EINVAL;
0545 }
0546
0547 if (mask &&
0548 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
0549 if (!io_poll_can_finish_inline(req, ipt))
0550 return 0;
0551 io_poll_remove_entries(req);
0552 ipt->result_mask = mask;
0553
0554 return 1;
0555 }
0556
0557 if (req->flags & REQ_F_HASH_LOCKED)
0558 io_poll_req_insert_locked(req);
0559 else
0560 io_poll_req_insert(req);
0561
0562 if (mask && (poll->events & EPOLLET) &&
0563 io_poll_can_finish_inline(req, ipt)) {
0564 __io_poll_execute(req, mask);
0565 return 0;
0566 }
0567
0568 if (ipt->owning) {
0569
0570
0571
0572
0573 v = atomic_dec_return(&req->poll_refs);
0574 if (unlikely(v & IO_POLL_REF_MASK))
0575 __io_poll_execute(req, 0);
0576 }
0577 return 0;
0578 }
0579
0580 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
0581 struct poll_table_struct *p)
0582 {
0583 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
0584 struct async_poll *apoll = pt->req->apoll;
0585
0586 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
0587 }
0588
0589 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
0590 unsigned issue_flags)
0591 {
0592 struct io_ring_ctx *ctx = req->ctx;
0593 struct io_cache_entry *entry;
0594 struct async_poll *apoll;
0595
0596 if (req->flags & REQ_F_POLLED) {
0597 apoll = req->apoll;
0598 kfree(apoll->double_poll);
0599 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
0600 (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
0601 apoll = container_of(entry, struct async_poll, cache);
0602 } else {
0603 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
0604 if (unlikely(!apoll))
0605 return NULL;
0606 }
0607 apoll->double_poll = NULL;
0608 req->apoll = apoll;
0609 return apoll;
0610 }
0611
0612 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
0613 {
0614 const struct io_op_def *def = &io_op_defs[req->opcode];
0615 struct async_poll *apoll;
0616 struct io_poll_table ipt;
0617 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
0618 int ret;
0619
0620
0621
0622
0623
0624 req->flags |= REQ_F_HASH_LOCKED;
0625
0626 if (!def->pollin && !def->pollout)
0627 return IO_APOLL_ABORTED;
0628 if (!file_can_poll(req->file))
0629 return IO_APOLL_ABORTED;
0630 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
0631 return IO_APOLL_ABORTED;
0632 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
0633 mask |= EPOLLONESHOT;
0634
0635 if (def->pollin) {
0636 mask |= EPOLLIN | EPOLLRDNORM;
0637
0638
0639 if (req->flags & REQ_F_CLEAR_POLLIN)
0640 mask &= ~EPOLLIN;
0641 } else {
0642 mask |= EPOLLOUT | EPOLLWRNORM;
0643 }
0644 if (def->poll_exclusive)
0645 mask |= EPOLLEXCLUSIVE;
0646
0647 apoll = io_req_alloc_apoll(req, issue_flags);
0648 if (!apoll)
0649 return IO_APOLL_ABORTED;
0650 req->flags |= REQ_F_POLLED;
0651 ipt.pt._qproc = io_async_queue_proc;
0652
0653 io_kbuf_recycle(req, issue_flags);
0654
0655 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
0656 if (ret)
0657 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
0658 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
0659 return IO_APOLL_OK;
0660 }
0661
0662 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
0663 struct io_hash_table *table,
0664 bool cancel_all)
0665 {
0666 unsigned nr_buckets = 1U << table->hash_bits;
0667 struct hlist_node *tmp;
0668 struct io_kiocb *req;
0669 bool found = false;
0670 int i;
0671
0672 for (i = 0; i < nr_buckets; i++) {
0673 struct io_hash_bucket *hb = &table->hbs[i];
0674
0675 spin_lock(&hb->lock);
0676 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
0677 if (io_match_task_safe(req, tsk, cancel_all)) {
0678 hlist_del_init(&req->hash_node);
0679 io_poll_cancel_req(req);
0680 found = true;
0681 }
0682 }
0683 spin_unlock(&hb->lock);
0684 }
0685 return found;
0686 }
0687
0688
0689
0690
0691 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
0692 bool cancel_all)
0693 __must_hold(&ctx->uring_lock)
0694 {
0695 bool ret;
0696
0697 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
0698 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
0699 return ret;
0700 }
0701
0702 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
0703 struct io_cancel_data *cd,
0704 struct io_hash_table *table,
0705 struct io_hash_bucket **out_bucket)
0706 {
0707 struct io_kiocb *req;
0708 u32 index = hash_long(cd->data, table->hash_bits);
0709 struct io_hash_bucket *hb = &table->hbs[index];
0710
0711 *out_bucket = NULL;
0712
0713 spin_lock(&hb->lock);
0714 hlist_for_each_entry(req, &hb->list, hash_node) {
0715 if (cd->data != req->cqe.user_data)
0716 continue;
0717 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
0718 continue;
0719 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
0720 if (cd->seq == req->work.cancel_seq)
0721 continue;
0722 req->work.cancel_seq = cd->seq;
0723 }
0724 *out_bucket = hb;
0725 return req;
0726 }
0727 spin_unlock(&hb->lock);
0728 return NULL;
0729 }
0730
0731 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
0732 struct io_cancel_data *cd,
0733 struct io_hash_table *table,
0734 struct io_hash_bucket **out_bucket)
0735 {
0736 unsigned nr_buckets = 1U << table->hash_bits;
0737 struct io_kiocb *req;
0738 int i;
0739
0740 *out_bucket = NULL;
0741
0742 for (i = 0; i < nr_buckets; i++) {
0743 struct io_hash_bucket *hb = &table->hbs[i];
0744
0745 spin_lock(&hb->lock);
0746 hlist_for_each_entry(req, &hb->list, hash_node) {
0747 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
0748 req->file != cd->file)
0749 continue;
0750 if (cd->seq == req->work.cancel_seq)
0751 continue;
0752 req->work.cancel_seq = cd->seq;
0753 *out_bucket = hb;
0754 return req;
0755 }
0756 spin_unlock(&hb->lock);
0757 }
0758 return NULL;
0759 }
0760
0761 static int io_poll_disarm(struct io_kiocb *req)
0762 {
0763 if (!req)
0764 return -ENOENT;
0765 if (!io_poll_get_ownership(req))
0766 return -EALREADY;
0767 io_poll_remove_entries(req);
0768 hash_del(&req->hash_node);
0769 return 0;
0770 }
0771
0772 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
0773 struct io_hash_table *table)
0774 {
0775 struct io_hash_bucket *bucket;
0776 struct io_kiocb *req;
0777
0778 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
0779 req = io_poll_file_find(ctx, cd, table, &bucket);
0780 else
0781 req = io_poll_find(ctx, false, cd, table, &bucket);
0782
0783 if (req)
0784 io_poll_cancel_req(req);
0785 if (bucket)
0786 spin_unlock(&bucket->lock);
0787 return req ? 0 : -ENOENT;
0788 }
0789
0790 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
0791 unsigned issue_flags)
0792 {
0793 int ret;
0794
0795 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
0796 if (ret != -ENOENT)
0797 return ret;
0798
0799 io_ring_submit_lock(ctx, issue_flags);
0800 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
0801 io_ring_submit_unlock(ctx, issue_flags);
0802 return ret;
0803 }
0804
0805 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
0806 unsigned int flags)
0807 {
0808 u32 events;
0809
0810 events = READ_ONCE(sqe->poll32_events);
0811 #ifdef __BIG_ENDIAN
0812 events = swahw32(events);
0813 #endif
0814 if (!(flags & IORING_POLL_ADD_MULTI))
0815 events |= EPOLLONESHOT;
0816 if (!(flags & IORING_POLL_ADD_LEVEL))
0817 events |= EPOLLET;
0818 return demangle_poll(events) |
0819 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
0820 }
0821
0822 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0823 {
0824 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
0825 u32 flags;
0826
0827 if (sqe->buf_index || sqe->splice_fd_in)
0828 return -EINVAL;
0829 flags = READ_ONCE(sqe->len);
0830 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
0831 IORING_POLL_ADD_MULTI))
0832 return -EINVAL;
0833
0834 if (flags == IORING_POLL_ADD_MULTI)
0835 return -EINVAL;
0836
0837 upd->old_user_data = READ_ONCE(sqe->addr);
0838 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
0839 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
0840
0841 upd->new_user_data = READ_ONCE(sqe->off);
0842 if (!upd->update_user_data && upd->new_user_data)
0843 return -EINVAL;
0844 if (upd->update_events)
0845 upd->events = io_poll_parse_events(sqe, flags);
0846 else if (sqe->poll32_events)
0847 return -EINVAL;
0848
0849 return 0;
0850 }
0851
0852 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
0853 {
0854 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
0855 u32 flags;
0856
0857 if (sqe->buf_index || sqe->off || sqe->addr)
0858 return -EINVAL;
0859 flags = READ_ONCE(sqe->len);
0860 if (flags & ~IORING_POLL_ADD_MULTI)
0861 return -EINVAL;
0862 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
0863 return -EINVAL;
0864
0865 poll->events = io_poll_parse_events(sqe, flags);
0866 return 0;
0867 }
0868
0869 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0870 {
0871 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
0872 struct io_poll_table ipt;
0873 int ret;
0874
0875 ipt.pt._qproc = io_poll_queue_proc;
0876
0877
0878
0879
0880
0881 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
0882 req->flags |= REQ_F_HASH_LOCKED;
0883
0884 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
0885 if (ret > 0) {
0886 io_req_set_res(req, ipt.result_mask, 0);
0887 return IOU_OK;
0888 }
0889 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
0890 }
0891
0892 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
0893 {
0894 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
0895 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
0896 struct io_ring_ctx *ctx = req->ctx;
0897 struct io_hash_bucket *bucket;
0898 struct io_kiocb *preq;
0899 int ret2, ret = 0;
0900 bool locked;
0901
0902 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
0903 ret2 = io_poll_disarm(preq);
0904 if (bucket)
0905 spin_unlock(&bucket->lock);
0906 if (!ret2)
0907 goto found;
0908 if (ret2 != -ENOENT) {
0909 ret = ret2;
0910 goto out;
0911 }
0912
0913 io_ring_submit_lock(ctx, issue_flags);
0914 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
0915 ret2 = io_poll_disarm(preq);
0916 if (bucket)
0917 spin_unlock(&bucket->lock);
0918 io_ring_submit_unlock(ctx, issue_flags);
0919 if (ret2) {
0920 ret = ret2;
0921 goto out;
0922 }
0923
0924 found:
0925 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
0926 ret = -EFAULT;
0927 goto out;
0928 }
0929
0930 if (poll_update->update_events || poll_update->update_user_data) {
0931
0932 if (poll_update->update_events) {
0933 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
0934
0935 poll->events &= ~0xffff;
0936 poll->events |= poll_update->events & 0xffff;
0937 poll->events |= IO_POLL_UNMASK;
0938 }
0939 if (poll_update->update_user_data)
0940 preq->cqe.user_data = poll_update->new_user_data;
0941
0942 ret2 = io_poll_add(preq, issue_flags);
0943
0944 if (!ret2 || ret2 == -EIOCBQUEUED)
0945 goto out;
0946 }
0947
0948 req_set_fail(preq);
0949 io_req_set_res(preq, -ECANCELED, 0);
0950 locked = !(issue_flags & IO_URING_F_UNLOCKED);
0951 io_req_task_complete(preq, &locked);
0952 out:
0953 if (ret < 0) {
0954 req_set_fail(req);
0955 return ret;
0956 }
0957
0958 io_req_set_res(req, ret, 0);
0959 return IOU_OK;
0960 }
0961
0962 void io_apoll_cache_free(struct io_cache_entry *entry)
0963 {
0964 kfree(container_of(entry, struct async_poll, cache));
0965 }