0001
0002
0003
0004
0005
0006 #include <linux/kernel.h>
0007 #include <linux/errno.h>
0008 #include <linux/file.h>
0009 #include <linux/mm.h>
0010 #include <linux/slab.h>
0011 #include <linux/audit.h>
0012 #include <linux/security.h>
0013 #include <linux/io_uring.h>
0014
0015 #include <uapi/linux/io_uring.h>
0016
0017 #include "io_uring.h"
0018 #include "sqpoll.h"
0019
0020 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
0021
0022 enum {
0023 IO_SQ_THREAD_SHOULD_STOP = 0,
0024 IO_SQ_THREAD_SHOULD_PARK,
0025 };
0026
0027 void io_sq_thread_unpark(struct io_sq_data *sqd)
0028 __releases(&sqd->lock)
0029 {
0030 WARN_ON_ONCE(sqd->thread == current);
0031
0032
0033
0034
0035
0036 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
0037 if (atomic_dec_return(&sqd->park_pending))
0038 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
0039 mutex_unlock(&sqd->lock);
0040 }
0041
0042 void io_sq_thread_park(struct io_sq_data *sqd)
0043 __acquires(&sqd->lock)
0044 {
0045 WARN_ON_ONCE(sqd->thread == current);
0046
0047 atomic_inc(&sqd->park_pending);
0048 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
0049 mutex_lock(&sqd->lock);
0050 if (sqd->thread)
0051 wake_up_process(sqd->thread);
0052 }
0053
0054 void io_sq_thread_stop(struct io_sq_data *sqd)
0055 {
0056 WARN_ON_ONCE(sqd->thread == current);
0057 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
0058
0059 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
0060 mutex_lock(&sqd->lock);
0061 if (sqd->thread)
0062 wake_up_process(sqd->thread);
0063 mutex_unlock(&sqd->lock);
0064 wait_for_completion(&sqd->exited);
0065 }
0066
0067 void io_put_sq_data(struct io_sq_data *sqd)
0068 {
0069 if (refcount_dec_and_test(&sqd->refs)) {
0070 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
0071
0072 io_sq_thread_stop(sqd);
0073 kfree(sqd);
0074 }
0075 }
0076
0077 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
0078 {
0079 struct io_ring_ctx *ctx;
0080 unsigned sq_thread_idle = 0;
0081
0082 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
0083 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
0084 sqd->sq_thread_idle = sq_thread_idle;
0085 }
0086
0087 void io_sq_thread_finish(struct io_ring_ctx *ctx)
0088 {
0089 struct io_sq_data *sqd = ctx->sq_data;
0090
0091 if (sqd) {
0092 io_sq_thread_park(sqd);
0093 list_del_init(&ctx->sqd_list);
0094 io_sqd_update_thread_idle(sqd);
0095 io_sq_thread_unpark(sqd);
0096
0097 io_put_sq_data(sqd);
0098 ctx->sq_data = NULL;
0099 }
0100 }
0101
0102 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
0103 {
0104 struct io_ring_ctx *ctx_attach;
0105 struct io_sq_data *sqd;
0106 struct fd f;
0107
0108 f = fdget(p->wq_fd);
0109 if (!f.file)
0110 return ERR_PTR(-ENXIO);
0111 if (!io_is_uring_fops(f.file)) {
0112 fdput(f);
0113 return ERR_PTR(-EINVAL);
0114 }
0115
0116 ctx_attach = f.file->private_data;
0117 sqd = ctx_attach->sq_data;
0118 if (!sqd) {
0119 fdput(f);
0120 return ERR_PTR(-EINVAL);
0121 }
0122 if (sqd->task_tgid != current->tgid) {
0123 fdput(f);
0124 return ERR_PTR(-EPERM);
0125 }
0126
0127 refcount_inc(&sqd->refs);
0128 fdput(f);
0129 return sqd;
0130 }
0131
0132 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
0133 bool *attached)
0134 {
0135 struct io_sq_data *sqd;
0136
0137 *attached = false;
0138 if (p->flags & IORING_SETUP_ATTACH_WQ) {
0139 sqd = io_attach_sq_data(p);
0140 if (!IS_ERR(sqd)) {
0141 *attached = true;
0142 return sqd;
0143 }
0144
0145 if (PTR_ERR(sqd) != -EPERM)
0146 return sqd;
0147 }
0148
0149 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
0150 if (!sqd)
0151 return ERR_PTR(-ENOMEM);
0152
0153 atomic_set(&sqd->park_pending, 0);
0154 refcount_set(&sqd->refs, 1);
0155 INIT_LIST_HEAD(&sqd->ctx_list);
0156 mutex_init(&sqd->lock);
0157 init_waitqueue_head(&sqd->wait);
0158 init_completion(&sqd->exited);
0159 return sqd;
0160 }
0161
0162 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
0163 {
0164 return READ_ONCE(sqd->state);
0165 }
0166
0167 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
0168 {
0169 unsigned int to_submit;
0170 int ret = 0;
0171
0172 to_submit = io_sqring_entries(ctx);
0173
0174 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
0175 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
0176
0177 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
0178 const struct cred *creds = NULL;
0179
0180 if (ctx->sq_creds != current_cred())
0181 creds = override_creds(ctx->sq_creds);
0182
0183 mutex_lock(&ctx->uring_lock);
0184 if (!wq_list_empty(&ctx->iopoll_list))
0185 io_do_iopoll(ctx, true);
0186
0187
0188
0189
0190
0191 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
0192 !(ctx->flags & IORING_SETUP_R_DISABLED))
0193 ret = io_submit_sqes(ctx, to_submit);
0194 mutex_unlock(&ctx->uring_lock);
0195
0196 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
0197 wake_up(&ctx->sqo_sq_wait);
0198 if (creds)
0199 revert_creds(creds);
0200 }
0201
0202 return ret;
0203 }
0204
0205 static bool io_sqd_handle_event(struct io_sq_data *sqd)
0206 {
0207 bool did_sig = false;
0208 struct ksignal ksig;
0209
0210 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
0211 signal_pending(current)) {
0212 mutex_unlock(&sqd->lock);
0213 if (signal_pending(current))
0214 did_sig = get_signal(&ksig);
0215 cond_resched();
0216 mutex_lock(&sqd->lock);
0217 }
0218 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
0219 }
0220
0221 static int io_sq_thread(void *data)
0222 {
0223 struct io_sq_data *sqd = data;
0224 struct io_ring_ctx *ctx;
0225 unsigned long timeout = 0;
0226 char buf[TASK_COMM_LEN];
0227 DEFINE_WAIT(wait);
0228
0229 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
0230 set_task_comm(current, buf);
0231
0232 if (sqd->sq_cpu != -1)
0233 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
0234 else
0235 set_cpus_allowed_ptr(current, cpu_online_mask);
0236 current->flags |= PF_NO_SETAFFINITY;
0237
0238 mutex_lock(&sqd->lock);
0239 while (1) {
0240 bool cap_entries, sqt_spin = false;
0241
0242 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
0243 if (io_sqd_handle_event(sqd))
0244 break;
0245 timeout = jiffies + sqd->sq_thread_idle;
0246 }
0247
0248 cap_entries = !list_is_singular(&sqd->ctx_list);
0249 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
0250 int ret = __io_sq_thread(ctx, cap_entries);
0251
0252 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
0253 sqt_spin = true;
0254 }
0255 if (io_run_task_work())
0256 sqt_spin = true;
0257
0258 if (sqt_spin || !time_after(jiffies, timeout)) {
0259 cond_resched();
0260 if (sqt_spin)
0261 timeout = jiffies + sqd->sq_thread_idle;
0262 continue;
0263 }
0264
0265 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
0266 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
0267 bool needs_sched = true;
0268
0269 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
0270 atomic_or(IORING_SQ_NEED_WAKEUP,
0271 &ctx->rings->sq_flags);
0272 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
0273 !wq_list_empty(&ctx->iopoll_list)) {
0274 needs_sched = false;
0275 break;
0276 }
0277
0278
0279
0280
0281
0282 smp_mb__after_atomic();
0283
0284 if (io_sqring_entries(ctx)) {
0285 needs_sched = false;
0286 break;
0287 }
0288 }
0289
0290 if (needs_sched) {
0291 mutex_unlock(&sqd->lock);
0292 schedule();
0293 mutex_lock(&sqd->lock);
0294 }
0295 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
0296 atomic_andnot(IORING_SQ_NEED_WAKEUP,
0297 &ctx->rings->sq_flags);
0298 }
0299
0300 finish_wait(&sqd->wait, &wait);
0301 timeout = jiffies + sqd->sq_thread_idle;
0302 }
0303
0304 io_uring_cancel_generic(true, sqd);
0305 sqd->thread = NULL;
0306 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
0307 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
0308 io_run_task_work();
0309 mutex_unlock(&sqd->lock);
0310
0311 complete(&sqd->exited);
0312 do_exit(0);
0313 }
0314
0315 int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
0316 {
0317 DEFINE_WAIT(wait);
0318
0319 do {
0320 if (!io_sqring_full(ctx))
0321 break;
0322 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
0323
0324 if (!io_sqring_full(ctx))
0325 break;
0326 schedule();
0327 } while (!signal_pending(current));
0328
0329 finish_wait(&ctx->sqo_sq_wait, &wait);
0330 return 0;
0331 }
0332
0333 __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
0334 struct io_uring_params *p)
0335 {
0336 int ret;
0337
0338
0339 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
0340 IORING_SETUP_ATTACH_WQ) {
0341 struct fd f;
0342
0343 f = fdget(p->wq_fd);
0344 if (!f.file)
0345 return -ENXIO;
0346 if (!io_is_uring_fops(f.file)) {
0347 fdput(f);
0348 return -EINVAL;
0349 }
0350 fdput(f);
0351 }
0352 if (ctx->flags & IORING_SETUP_SQPOLL) {
0353 struct task_struct *tsk;
0354 struct io_sq_data *sqd;
0355 bool attached;
0356
0357 ret = security_uring_sqpoll();
0358 if (ret)
0359 return ret;
0360
0361 sqd = io_get_sq_data(p, &attached);
0362 if (IS_ERR(sqd)) {
0363 ret = PTR_ERR(sqd);
0364 goto err;
0365 }
0366
0367 ctx->sq_creds = get_current_cred();
0368 ctx->sq_data = sqd;
0369 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
0370 if (!ctx->sq_thread_idle)
0371 ctx->sq_thread_idle = HZ;
0372
0373 io_sq_thread_park(sqd);
0374 list_add(&ctx->sqd_list, &sqd->ctx_list);
0375 io_sqd_update_thread_idle(sqd);
0376
0377 ret = (attached && !sqd->thread) ? -ENXIO : 0;
0378 io_sq_thread_unpark(sqd);
0379
0380 if (ret < 0)
0381 goto err;
0382 if (attached)
0383 return 0;
0384
0385 if (p->flags & IORING_SETUP_SQ_AFF) {
0386 int cpu = p->sq_thread_cpu;
0387
0388 ret = -EINVAL;
0389 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
0390 goto err_sqpoll;
0391 sqd->sq_cpu = cpu;
0392 } else {
0393 sqd->sq_cpu = -1;
0394 }
0395
0396 sqd->task_pid = current->pid;
0397 sqd->task_tgid = current->tgid;
0398 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
0399 if (IS_ERR(tsk)) {
0400 ret = PTR_ERR(tsk);
0401 goto err_sqpoll;
0402 }
0403
0404 sqd->thread = tsk;
0405 ret = io_uring_alloc_task_context(tsk, ctx);
0406 wake_up_new_task(tsk);
0407 if (ret)
0408 goto err;
0409 } else if (p->flags & IORING_SETUP_SQ_AFF) {
0410
0411 ret = -EINVAL;
0412 goto err;
0413 }
0414
0415 return 0;
0416 err_sqpoll:
0417 complete(&ctx->sq_data->exited);
0418 err:
0419 io_sq_thread_finish(ctx);
0420 return ret;
0421 }