Back to home page

OSCL-LXR

 
 

    


0001 #ifndef IOU_CORE_H
0002 #define IOU_CORE_H
0003 
0004 #include <linux/errno.h>
0005 #include <linux/lockdep.h>
0006 #include <linux/io_uring_types.h>
0007 #include "io-wq.h"
0008 #include "slist.h"
0009 #include "filetable.h"
0010 
0011 #ifndef CREATE_TRACE_POINTS
0012 #include <trace/events/io_uring.h>
0013 #endif
0014 
0015 enum {
0016     IOU_OK          = 0,
0017     IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
0018 
0019     /*
0020      * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
0021      * are set to indicate to the poll runner that multishot should be
0022      * removed and the result is set on req->cqe.res.
0023      */
0024     IOU_STOP_MULTISHOT  = -ECANCELED,
0025 };
0026 
0027 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
0028 bool io_req_cqe_overflow(struct io_kiocb *req);
0029 int io_run_task_work_sig(void);
0030 void io_req_complete_failed(struct io_kiocb *req, s32 res);
0031 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
0032 void io_req_complete_post(struct io_kiocb *req);
0033 void __io_req_complete_post(struct io_kiocb *req);
0034 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
0035              bool allow_overflow);
0036 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
0037              bool allow_overflow);
0038 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
0039 
0040 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
0041 
0042 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
0043 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
0044                    unsigned issue_flags);
0045 
0046 static inline bool io_req_ffs_set(struct io_kiocb *req)
0047 {
0048     return req->flags & REQ_F_FIXED_FILE;
0049 }
0050 
0051 bool io_is_uring_fops(struct file *file);
0052 bool io_alloc_async_data(struct io_kiocb *req);
0053 void io_req_task_work_add(struct io_kiocb *req);
0054 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
0055 void io_req_task_queue(struct io_kiocb *req);
0056 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
0057 void io_req_task_complete(struct io_kiocb *req, bool *locked);
0058 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
0059 void io_req_task_submit(struct io_kiocb *req, bool *locked);
0060 void tctx_task_work(struct callback_head *cb);
0061 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
0062 int io_uring_alloc_task_context(struct task_struct *task,
0063                 struct io_ring_ctx *ctx);
0064 
0065 int io_poll_issue(struct io_kiocb *req, bool *locked);
0066 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
0067 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
0068 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
0069 int io_req_prep_async(struct io_kiocb *req);
0070 
0071 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
0072 void io_wq_submit_work(struct io_wq_work *work);
0073 
0074 void io_free_req(struct io_kiocb *req);
0075 void io_queue_next(struct io_kiocb *req);
0076 void __io_put_task(struct task_struct *task, int nr);
0077 void io_task_refs_refill(struct io_uring_task *tctx);
0078 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
0079 
0080 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
0081             bool cancel_all);
0082 
0083 #define io_for_each_link(pos, head) \
0084     for (pos = (head); pos; pos = pos->link)
0085 
0086 static inline void io_cq_lock(struct io_ring_ctx *ctx)
0087     __acquires(ctx->completion_lock)
0088 {
0089     spin_lock(&ctx->completion_lock);
0090 }
0091 
0092 void io_cq_unlock_post(struct io_ring_ctx *ctx);
0093 
0094 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
0095 {
0096     if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
0097         struct io_uring_cqe *cqe = ctx->cqe_cached;
0098 
0099         ctx->cached_cq_tail++;
0100         ctx->cqe_cached++;
0101         if (ctx->flags & IORING_SETUP_CQE32)
0102             ctx->cqe_cached++;
0103         return cqe;
0104     }
0105 
0106     return __io_get_cqe(ctx);
0107 }
0108 
0109 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
0110                      struct io_kiocb *req)
0111 {
0112     struct io_uring_cqe *cqe;
0113 
0114     /*
0115      * If we can't get a cq entry, userspace overflowed the
0116      * submission (by quite a lot). Increment the overflow count in
0117      * the ring.
0118      */
0119     cqe = io_get_cqe(ctx);
0120     if (unlikely(!cqe))
0121         return io_req_cqe_overflow(req);
0122 
0123     trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
0124                 req->cqe.res, req->cqe.flags,
0125                 (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
0126                 (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
0127 
0128     memcpy(cqe, &req->cqe, sizeof(*cqe));
0129 
0130     if (ctx->flags & IORING_SETUP_CQE32) {
0131         u64 extra1 = 0, extra2 = 0;
0132 
0133         if (req->flags & REQ_F_CQE32_INIT) {
0134             extra1 = req->extra1;
0135             extra2 = req->extra2;
0136         }
0137 
0138         WRITE_ONCE(cqe->big_cqe[0], extra1);
0139         WRITE_ONCE(cqe->big_cqe[1], extra2);
0140     }
0141     return true;
0142 }
0143 
0144 static inline void req_set_fail(struct io_kiocb *req)
0145 {
0146     req->flags |= REQ_F_FAIL;
0147     if (req->flags & REQ_F_CQE_SKIP) {
0148         req->flags &= ~REQ_F_CQE_SKIP;
0149         req->flags |= REQ_F_SKIP_LINK_CQES;
0150     }
0151 }
0152 
0153 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
0154 {
0155     req->cqe.res = res;
0156     req->cqe.flags = cflags;
0157 }
0158 
0159 static inline bool req_has_async_data(struct io_kiocb *req)
0160 {
0161     return req->flags & REQ_F_ASYNC_DATA;
0162 }
0163 
0164 static inline void io_put_file(struct file *file)
0165 {
0166     if (file)
0167         fput(file);
0168 }
0169 
0170 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
0171                      unsigned issue_flags)
0172 {
0173     lockdep_assert_held(&ctx->uring_lock);
0174     if (issue_flags & IO_URING_F_UNLOCKED)
0175         mutex_unlock(&ctx->uring_lock);
0176 }
0177 
0178 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
0179                        unsigned issue_flags)
0180 {
0181     /*
0182      * "Normal" inline submissions always hold the uring_lock, since we
0183      * grab it from the system call. Same is true for the SQPOLL offload.
0184      * The only exception is when we've detached the request and issue it
0185      * from an async worker thread, grab the lock for that case.
0186      */
0187     if (issue_flags & IO_URING_F_UNLOCKED)
0188         mutex_lock(&ctx->uring_lock);
0189     lockdep_assert_held(&ctx->uring_lock);
0190 }
0191 
0192 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
0193 {
0194     /* order cqe stores with ring update */
0195     smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
0196 }
0197 
0198 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
0199 {
0200     /*
0201      * wake_up_all() may seem excessive, but io_wake_function() and
0202      * io_should_wake() handle the termination of the loop and only
0203      * wake as many waiters as we need to.
0204      */
0205     if (wq_has_sleeper(&ctx->cq_wait))
0206         wake_up_all(&ctx->cq_wait);
0207 }
0208 
0209 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
0210 {
0211     struct io_rings *r = ctx->rings;
0212 
0213     return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
0214 }
0215 
0216 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
0217 {
0218     struct io_rings *rings = ctx->rings;
0219 
0220     /* make sure SQ entry isn't read before tail */
0221     return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
0222 }
0223 
0224 static inline bool io_run_task_work(void)
0225 {
0226     if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
0227         __set_current_state(TASK_RUNNING);
0228         clear_notify_signal();
0229         if (task_work_pending(current))
0230             task_work_run();
0231         return true;
0232     }
0233 
0234     return false;
0235 }
0236 
0237 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
0238 {
0239     if (!*locked) {
0240         mutex_lock(&ctx->uring_lock);
0241         *locked = true;
0242     }
0243 }
0244 
0245 /*
0246  * Don't complete immediately but use deferred completion infrastructure.
0247  * Protected by ->uring_lock and can only be used either with
0248  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
0249  */
0250 static inline void io_req_complete_defer(struct io_kiocb *req)
0251     __must_hold(&req->ctx->uring_lock)
0252 {
0253     struct io_submit_state *state = &req->ctx->submit_state;
0254 
0255     lockdep_assert_held(&req->ctx->uring_lock);
0256 
0257     wq_list_add_tail(&req->comp_list, &state->compl_reqs);
0258 }
0259 
0260 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
0261 {
0262     if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
0263         __io_commit_cqring_flush(ctx);
0264 }
0265 
0266 /* must to be called somewhat shortly after putting a request */
0267 static inline void io_put_task(struct task_struct *task, int nr)
0268 {
0269     if (likely(task == current))
0270         task->io_uring->cached_refs += nr;
0271     else
0272         __io_put_task(task, nr);
0273 }
0274 
0275 static inline void io_get_task_refs(int nr)
0276 {
0277     struct io_uring_task *tctx = current->io_uring;
0278 
0279     tctx->cached_refs -= nr;
0280     if (unlikely(tctx->cached_refs < 0))
0281         io_task_refs_refill(tctx);
0282 }
0283 
0284 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
0285 {
0286     return !ctx->submit_state.free_list.next;
0287 }
0288 
0289 static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
0290 {
0291     if (unlikely(io_req_cache_empty(ctx)))
0292         return __io_alloc_req_refill(ctx);
0293     return true;
0294 }
0295 
0296 static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
0297 {
0298     struct io_wq_work_node *node;
0299 
0300     node = wq_stack_extract(&ctx->submit_state.free_list);
0301     return container_of(node, struct io_kiocb, comp_list);
0302 }
0303 
0304 #endif