Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *  An async IO implementation for Linux
0003  *  Written by Benjamin LaHaise <bcrl@kvack.org>
0004  *
0005  *  Implements an efficient asynchronous io interface.
0006  *
0007  *  Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
0008  *  Copyright 2018 Christoph Hellwig.
0009  *
0010  *  See ../COPYING for licensing terms.
0011  */
0012 #define pr_fmt(fmt) "%s: " fmt, __func__
0013 
0014 #include <linux/kernel.h>
0015 #include <linux/init.h>
0016 #include <linux/errno.h>
0017 #include <linux/time.h>
0018 #include <linux/aio_abi.h>
0019 #include <linux/export.h>
0020 #include <linux/syscalls.h>
0021 #include <linux/backing-dev.h>
0022 #include <linux/refcount.h>
0023 #include <linux/uio.h>
0024 
0025 #include <linux/sched/signal.h>
0026 #include <linux/fs.h>
0027 #include <linux/file.h>
0028 #include <linux/mm.h>
0029 #include <linux/mman.h>
0030 #include <linux/percpu.h>
0031 #include <linux/slab.h>
0032 #include <linux/timer.h>
0033 #include <linux/aio.h>
0034 #include <linux/highmem.h>
0035 #include <linux/workqueue.h>
0036 #include <linux/security.h>
0037 #include <linux/eventfd.h>
0038 #include <linux/blkdev.h>
0039 #include <linux/compat.h>
0040 #include <linux/migrate.h>
0041 #include <linux/ramfs.h>
0042 #include <linux/percpu-refcount.h>
0043 #include <linux/mount.h>
0044 #include <linux/pseudo_fs.h>
0045 
0046 #include <linux/uaccess.h>
0047 #include <linux/nospec.h>
0048 
0049 #include "internal.h"
0050 
0051 #define KIOCB_KEY       0
0052 
0053 #define AIO_RING_MAGIC          0xa10a10a1
0054 #define AIO_RING_COMPAT_FEATURES    1
0055 #define AIO_RING_INCOMPAT_FEATURES  0
0056 struct aio_ring {
0057     unsigned    id; /* kernel internal index number */
0058     unsigned    nr; /* number of io_events */
0059     unsigned    head;   /* Written to by userland or under ring_lock
0060                  * mutex by aio_read_events_ring(). */
0061     unsigned    tail;
0062 
0063     unsigned    magic;
0064     unsigned    compat_features;
0065     unsigned    incompat_features;
0066     unsigned    header_length;  /* size of aio_ring */
0067 
0068 
0069     struct io_event     io_events[];
0070 }; /* 128 bytes + ring size */
0071 
0072 /*
0073  * Plugging is meant to work with larger batches of IOs. If we don't
0074  * have more than the below, then don't bother setting up a plug.
0075  */
0076 #define AIO_PLUG_THRESHOLD  2
0077 
0078 #define AIO_RING_PAGES  8
0079 
0080 struct kioctx_table {
0081     struct rcu_head     rcu;
0082     unsigned        nr;
0083     struct kioctx __rcu *table[];
0084 };
0085 
0086 struct kioctx_cpu {
0087     unsigned        reqs_available;
0088 };
0089 
0090 struct ctx_rq_wait {
0091     struct completion comp;
0092     atomic_t count;
0093 };
0094 
0095 struct kioctx {
0096     struct percpu_ref   users;
0097     atomic_t        dead;
0098 
0099     struct percpu_ref   reqs;
0100 
0101     unsigned long       user_id;
0102 
0103     struct __percpu kioctx_cpu *cpu;
0104 
0105     /*
0106      * For percpu reqs_available, number of slots we move to/from global
0107      * counter at a time:
0108      */
0109     unsigned        req_batch;
0110     /*
0111      * This is what userspace passed to io_setup(), it's not used for
0112      * anything but counting against the global max_reqs quota.
0113      *
0114      * The real limit is nr_events - 1, which will be larger (see
0115      * aio_setup_ring())
0116      */
0117     unsigned        max_reqs;
0118 
0119     /* Size of ringbuffer, in units of struct io_event */
0120     unsigned        nr_events;
0121 
0122     unsigned long       mmap_base;
0123     unsigned long       mmap_size;
0124 
0125     struct page     **ring_pages;
0126     long            nr_pages;
0127 
0128     struct rcu_work     free_rwork; /* see free_ioctx() */
0129 
0130     /*
0131      * signals when all in-flight requests are done
0132      */
0133     struct ctx_rq_wait  *rq_wait;
0134 
0135     struct {
0136         /*
0137          * This counts the number of available slots in the ringbuffer,
0138          * so we avoid overflowing it: it's decremented (if positive)
0139          * when allocating a kiocb and incremented when the resulting
0140          * io_event is pulled off the ringbuffer.
0141          *
0142          * We batch accesses to it with a percpu version.
0143          */
0144         atomic_t    reqs_available;
0145     } ____cacheline_aligned_in_smp;
0146 
0147     struct {
0148         spinlock_t  ctx_lock;
0149         struct list_head active_reqs;   /* used for cancellation */
0150     } ____cacheline_aligned_in_smp;
0151 
0152     struct {
0153         struct mutex    ring_lock;
0154         wait_queue_head_t wait;
0155     } ____cacheline_aligned_in_smp;
0156 
0157     struct {
0158         unsigned    tail;
0159         unsigned    completed_events;
0160         spinlock_t  completion_lock;
0161     } ____cacheline_aligned_in_smp;
0162 
0163     struct page     *internal_pages[AIO_RING_PAGES];
0164     struct file     *aio_ring_file;
0165 
0166     unsigned        id;
0167 };
0168 
0169 /*
0170  * First field must be the file pointer in all the
0171  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
0172  */
0173 struct fsync_iocb {
0174     struct file     *file;
0175     struct work_struct  work;
0176     bool            datasync;
0177     struct cred     *creds;
0178 };
0179 
0180 struct poll_iocb {
0181     struct file     *file;
0182     struct wait_queue_head  *head;
0183     __poll_t        events;
0184     bool            cancelled;
0185     bool            work_scheduled;
0186     bool            work_need_resched;
0187     struct wait_queue_entry wait;
0188     struct work_struct  work;
0189 };
0190 
0191 /*
0192  * NOTE! Each of the iocb union members has the file pointer
0193  * as the first entry in their struct definition. So you can
0194  * access the file pointer through any of the sub-structs,
0195  * or directly as just 'ki_filp' in this struct.
0196  */
0197 struct aio_kiocb {
0198     union {
0199         struct file     *ki_filp;
0200         struct kiocb        rw;
0201         struct fsync_iocb   fsync;
0202         struct poll_iocb    poll;
0203     };
0204 
0205     struct kioctx       *ki_ctx;
0206     kiocb_cancel_fn     *ki_cancel;
0207 
0208     struct io_event     ki_res;
0209 
0210     struct list_head    ki_list;    /* the aio core uses this
0211                          * for cancellation */
0212     refcount_t      ki_refcnt;
0213 
0214     /*
0215      * If the aio_resfd field of the userspace iocb is not zero,
0216      * this is the underlying eventfd context to deliver events to.
0217      */
0218     struct eventfd_ctx  *ki_eventfd;
0219 };
0220 
0221 /*------ sysctl variables----*/
0222 static DEFINE_SPINLOCK(aio_nr_lock);
0223 static unsigned long aio_nr;        /* current system wide number of aio requests */
0224 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
0225 /*----end sysctl variables---*/
0226 #ifdef CONFIG_SYSCTL
0227 static struct ctl_table aio_sysctls[] = {
0228     {
0229         .procname   = "aio-nr",
0230         .data       = &aio_nr,
0231         .maxlen     = sizeof(aio_nr),
0232         .mode       = 0444,
0233         .proc_handler   = proc_doulongvec_minmax,
0234     },
0235     {
0236         .procname   = "aio-max-nr",
0237         .data       = &aio_max_nr,
0238         .maxlen     = sizeof(aio_max_nr),
0239         .mode       = 0644,
0240         .proc_handler   = proc_doulongvec_minmax,
0241     },
0242     {}
0243 };
0244 
0245 static void __init aio_sysctl_init(void)
0246 {
0247     register_sysctl_init("fs", aio_sysctls);
0248 }
0249 #else
0250 #define aio_sysctl_init() do { } while (0)
0251 #endif
0252 
0253 static struct kmem_cache    *kiocb_cachep;
0254 static struct kmem_cache    *kioctx_cachep;
0255 
0256 static struct vfsmount *aio_mnt;
0257 
0258 static const struct file_operations aio_ring_fops;
0259 static const struct address_space_operations aio_ctx_aops;
0260 
0261 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
0262 {
0263     struct file *file;
0264     struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
0265     if (IS_ERR(inode))
0266         return ERR_CAST(inode);
0267 
0268     inode->i_mapping->a_ops = &aio_ctx_aops;
0269     inode->i_mapping->private_data = ctx;
0270     inode->i_size = PAGE_SIZE * nr_pages;
0271 
0272     file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
0273                 O_RDWR, &aio_ring_fops);
0274     if (IS_ERR(file))
0275         iput(inode);
0276     return file;
0277 }
0278 
0279 static int aio_init_fs_context(struct fs_context *fc)
0280 {
0281     if (!init_pseudo(fc, AIO_RING_MAGIC))
0282         return -ENOMEM;
0283     fc->s_iflags |= SB_I_NOEXEC;
0284     return 0;
0285 }
0286 
0287 /* aio_setup
0288  *  Creates the slab caches used by the aio routines, panic on
0289  *  failure as this is done early during the boot sequence.
0290  */
0291 static int __init aio_setup(void)
0292 {
0293     static struct file_system_type aio_fs = {
0294         .name       = "aio",
0295         .init_fs_context = aio_init_fs_context,
0296         .kill_sb    = kill_anon_super,
0297     };
0298     aio_mnt = kern_mount(&aio_fs);
0299     if (IS_ERR(aio_mnt))
0300         panic("Failed to create aio fs mount.");
0301 
0302     kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
0303     kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
0304     aio_sysctl_init();
0305     return 0;
0306 }
0307 __initcall(aio_setup);
0308 
0309 static void put_aio_ring_file(struct kioctx *ctx)
0310 {
0311     struct file *aio_ring_file = ctx->aio_ring_file;
0312     struct address_space *i_mapping;
0313 
0314     if (aio_ring_file) {
0315         truncate_setsize(file_inode(aio_ring_file), 0);
0316 
0317         /* Prevent further access to the kioctx from migratepages */
0318         i_mapping = aio_ring_file->f_mapping;
0319         spin_lock(&i_mapping->private_lock);
0320         i_mapping->private_data = NULL;
0321         ctx->aio_ring_file = NULL;
0322         spin_unlock(&i_mapping->private_lock);
0323 
0324         fput(aio_ring_file);
0325     }
0326 }
0327 
0328 static void aio_free_ring(struct kioctx *ctx)
0329 {
0330     int i;
0331 
0332     /* Disconnect the kiotx from the ring file.  This prevents future
0333      * accesses to the kioctx from page migration.
0334      */
0335     put_aio_ring_file(ctx);
0336 
0337     for (i = 0; i < ctx->nr_pages; i++) {
0338         struct page *page;
0339         pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
0340                 page_count(ctx->ring_pages[i]));
0341         page = ctx->ring_pages[i];
0342         if (!page)
0343             continue;
0344         ctx->ring_pages[i] = NULL;
0345         put_page(page);
0346     }
0347 
0348     if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
0349         kfree(ctx->ring_pages);
0350         ctx->ring_pages = NULL;
0351     }
0352 }
0353 
0354 static int aio_ring_mremap(struct vm_area_struct *vma)
0355 {
0356     struct file *file = vma->vm_file;
0357     struct mm_struct *mm = vma->vm_mm;
0358     struct kioctx_table *table;
0359     int i, res = -EINVAL;
0360 
0361     spin_lock(&mm->ioctx_lock);
0362     rcu_read_lock();
0363     table = rcu_dereference(mm->ioctx_table);
0364     for (i = 0; i < table->nr; i++) {
0365         struct kioctx *ctx;
0366 
0367         ctx = rcu_dereference(table->table[i]);
0368         if (ctx && ctx->aio_ring_file == file) {
0369             if (!atomic_read(&ctx->dead)) {
0370                 ctx->user_id = ctx->mmap_base = vma->vm_start;
0371                 res = 0;
0372             }
0373             break;
0374         }
0375     }
0376 
0377     rcu_read_unlock();
0378     spin_unlock(&mm->ioctx_lock);
0379     return res;
0380 }
0381 
0382 static const struct vm_operations_struct aio_ring_vm_ops = {
0383     .mremap     = aio_ring_mremap,
0384 #if IS_ENABLED(CONFIG_MMU)
0385     .fault      = filemap_fault,
0386     .map_pages  = filemap_map_pages,
0387     .page_mkwrite   = filemap_page_mkwrite,
0388 #endif
0389 };
0390 
0391 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
0392 {
0393     vma->vm_flags |= VM_DONTEXPAND;
0394     vma->vm_ops = &aio_ring_vm_ops;
0395     return 0;
0396 }
0397 
0398 static const struct file_operations aio_ring_fops = {
0399     .mmap = aio_ring_mmap,
0400 };
0401 
0402 #if IS_ENABLED(CONFIG_MIGRATION)
0403 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
0404             struct folio *src, enum migrate_mode mode)
0405 {
0406     struct kioctx *ctx;
0407     unsigned long flags;
0408     pgoff_t idx;
0409     int rc;
0410 
0411     /*
0412      * We cannot support the _NO_COPY case here, because copy needs to
0413      * happen under the ctx->completion_lock. That does not work with the
0414      * migration workflow of MIGRATE_SYNC_NO_COPY.
0415      */
0416     if (mode == MIGRATE_SYNC_NO_COPY)
0417         return -EINVAL;
0418 
0419     rc = 0;
0420 
0421     /* mapping->private_lock here protects against the kioctx teardown.  */
0422     spin_lock(&mapping->private_lock);
0423     ctx = mapping->private_data;
0424     if (!ctx) {
0425         rc = -EINVAL;
0426         goto out;
0427     }
0428 
0429     /* The ring_lock mutex.  The prevents aio_read_events() from writing
0430      * to the ring's head, and prevents page migration from mucking in
0431      * a partially initialized kiotx.
0432      */
0433     if (!mutex_trylock(&ctx->ring_lock)) {
0434         rc = -EAGAIN;
0435         goto out;
0436     }
0437 
0438     idx = src->index;
0439     if (idx < (pgoff_t)ctx->nr_pages) {
0440         /* Make sure the old folio hasn't already been changed */
0441         if (ctx->ring_pages[idx] != &src->page)
0442             rc = -EAGAIN;
0443     } else
0444         rc = -EINVAL;
0445 
0446     if (rc != 0)
0447         goto out_unlock;
0448 
0449     /* Writeback must be complete */
0450     BUG_ON(folio_test_writeback(src));
0451     folio_get(dst);
0452 
0453     rc = folio_migrate_mapping(mapping, dst, src, 1);
0454     if (rc != MIGRATEPAGE_SUCCESS) {
0455         folio_put(dst);
0456         goto out_unlock;
0457     }
0458 
0459     /* Take completion_lock to prevent other writes to the ring buffer
0460      * while the old folio is copied to the new.  This prevents new
0461      * events from being lost.
0462      */
0463     spin_lock_irqsave(&ctx->completion_lock, flags);
0464     folio_migrate_copy(dst, src);
0465     BUG_ON(ctx->ring_pages[idx] != &src->page);
0466     ctx->ring_pages[idx] = &dst->page;
0467     spin_unlock_irqrestore(&ctx->completion_lock, flags);
0468 
0469     /* The old folio is no longer accessible. */
0470     folio_put(src);
0471 
0472 out_unlock:
0473     mutex_unlock(&ctx->ring_lock);
0474 out:
0475     spin_unlock(&mapping->private_lock);
0476     return rc;
0477 }
0478 #else
0479 #define aio_migrate_folio NULL
0480 #endif
0481 
0482 static const struct address_space_operations aio_ctx_aops = {
0483     .dirty_folio    = noop_dirty_folio,
0484     .migrate_folio  = aio_migrate_folio,
0485 };
0486 
0487 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
0488 {
0489     struct aio_ring *ring;
0490     struct mm_struct *mm = current->mm;
0491     unsigned long size, unused;
0492     int nr_pages;
0493     int i;
0494     struct file *file;
0495 
0496     /* Compensate for the ring buffer's head/tail overlap entry */
0497     nr_events += 2; /* 1 is required, 2 for good luck */
0498 
0499     size = sizeof(struct aio_ring);
0500     size += sizeof(struct io_event) * nr_events;
0501 
0502     nr_pages = PFN_UP(size);
0503     if (nr_pages < 0)
0504         return -EINVAL;
0505 
0506     file = aio_private_file(ctx, nr_pages);
0507     if (IS_ERR(file)) {
0508         ctx->aio_ring_file = NULL;
0509         return -ENOMEM;
0510     }
0511 
0512     ctx->aio_ring_file = file;
0513     nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
0514             / sizeof(struct io_event);
0515 
0516     ctx->ring_pages = ctx->internal_pages;
0517     if (nr_pages > AIO_RING_PAGES) {
0518         ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
0519                       GFP_KERNEL);
0520         if (!ctx->ring_pages) {
0521             put_aio_ring_file(ctx);
0522             return -ENOMEM;
0523         }
0524     }
0525 
0526     for (i = 0; i < nr_pages; i++) {
0527         struct page *page;
0528         page = find_or_create_page(file->f_mapping,
0529                        i, GFP_HIGHUSER | __GFP_ZERO);
0530         if (!page)
0531             break;
0532         pr_debug("pid(%d) page[%d]->count=%d\n",
0533              current->pid, i, page_count(page));
0534         SetPageUptodate(page);
0535         unlock_page(page);
0536 
0537         ctx->ring_pages[i] = page;
0538     }
0539     ctx->nr_pages = i;
0540 
0541     if (unlikely(i != nr_pages)) {
0542         aio_free_ring(ctx);
0543         return -ENOMEM;
0544     }
0545 
0546     ctx->mmap_size = nr_pages * PAGE_SIZE;
0547     pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
0548 
0549     if (mmap_write_lock_killable(mm)) {
0550         ctx->mmap_size = 0;
0551         aio_free_ring(ctx);
0552         return -EINTR;
0553     }
0554 
0555     ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
0556                  PROT_READ | PROT_WRITE,
0557                  MAP_SHARED, 0, &unused, NULL);
0558     mmap_write_unlock(mm);
0559     if (IS_ERR((void *)ctx->mmap_base)) {
0560         ctx->mmap_size = 0;
0561         aio_free_ring(ctx);
0562         return -ENOMEM;
0563     }
0564 
0565     pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
0566 
0567     ctx->user_id = ctx->mmap_base;
0568     ctx->nr_events = nr_events; /* trusted copy */
0569 
0570     ring = kmap_atomic(ctx->ring_pages[0]);
0571     ring->nr = nr_events;   /* user copy */
0572     ring->id = ~0U;
0573     ring->head = ring->tail = 0;
0574     ring->magic = AIO_RING_MAGIC;
0575     ring->compat_features = AIO_RING_COMPAT_FEATURES;
0576     ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
0577     ring->header_length = sizeof(struct aio_ring);
0578     kunmap_atomic(ring);
0579     flush_dcache_page(ctx->ring_pages[0]);
0580 
0581     return 0;
0582 }
0583 
0584 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
0585 #define AIO_EVENTS_FIRST_PAGE   ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
0586 #define AIO_EVENTS_OFFSET   (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
0587 
0588 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
0589 {
0590     struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
0591     struct kioctx *ctx = req->ki_ctx;
0592     unsigned long flags;
0593 
0594     if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
0595         return;
0596 
0597     spin_lock_irqsave(&ctx->ctx_lock, flags);
0598     list_add_tail(&req->ki_list, &ctx->active_reqs);
0599     req->ki_cancel = cancel;
0600     spin_unlock_irqrestore(&ctx->ctx_lock, flags);
0601 }
0602 EXPORT_SYMBOL(kiocb_set_cancel_fn);
0603 
0604 /*
0605  * free_ioctx() should be RCU delayed to synchronize against the RCU
0606  * protected lookup_ioctx() and also needs process context to call
0607  * aio_free_ring().  Use rcu_work.
0608  */
0609 static void free_ioctx(struct work_struct *work)
0610 {
0611     struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
0612                       free_rwork);
0613     pr_debug("freeing %p\n", ctx);
0614 
0615     aio_free_ring(ctx);
0616     free_percpu(ctx->cpu);
0617     percpu_ref_exit(&ctx->reqs);
0618     percpu_ref_exit(&ctx->users);
0619     kmem_cache_free(kioctx_cachep, ctx);
0620 }
0621 
0622 static void free_ioctx_reqs(struct percpu_ref *ref)
0623 {
0624     struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
0625 
0626     /* At this point we know that there are no any in-flight requests */
0627     if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
0628         complete(&ctx->rq_wait->comp);
0629 
0630     /* Synchronize against RCU protected table->table[] dereferences */
0631     INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
0632     queue_rcu_work(system_wq, &ctx->free_rwork);
0633 }
0634 
0635 /*
0636  * When this function runs, the kioctx has been removed from the "hash table"
0637  * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
0638  * now it's safe to cancel any that need to be.
0639  */
0640 static void free_ioctx_users(struct percpu_ref *ref)
0641 {
0642     struct kioctx *ctx = container_of(ref, struct kioctx, users);
0643     struct aio_kiocb *req;
0644 
0645     spin_lock_irq(&ctx->ctx_lock);
0646 
0647     while (!list_empty(&ctx->active_reqs)) {
0648         req = list_first_entry(&ctx->active_reqs,
0649                        struct aio_kiocb, ki_list);
0650         req->ki_cancel(&req->rw);
0651         list_del_init(&req->ki_list);
0652     }
0653 
0654     spin_unlock_irq(&ctx->ctx_lock);
0655 
0656     percpu_ref_kill(&ctx->reqs);
0657     percpu_ref_put(&ctx->reqs);
0658 }
0659 
0660 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
0661 {
0662     unsigned i, new_nr;
0663     struct kioctx_table *table, *old;
0664     struct aio_ring *ring;
0665 
0666     spin_lock(&mm->ioctx_lock);
0667     table = rcu_dereference_raw(mm->ioctx_table);
0668 
0669     while (1) {
0670         if (table)
0671             for (i = 0; i < table->nr; i++)
0672                 if (!rcu_access_pointer(table->table[i])) {
0673                     ctx->id = i;
0674                     rcu_assign_pointer(table->table[i], ctx);
0675                     spin_unlock(&mm->ioctx_lock);
0676 
0677                     /* While kioctx setup is in progress,
0678                      * we are protected from page migration
0679                      * changes ring_pages by ->ring_lock.
0680                      */
0681                     ring = kmap_atomic(ctx->ring_pages[0]);
0682                     ring->id = ctx->id;
0683                     kunmap_atomic(ring);
0684                     return 0;
0685                 }
0686 
0687         new_nr = (table ? table->nr : 1) * 4;
0688         spin_unlock(&mm->ioctx_lock);
0689 
0690         table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
0691         if (!table)
0692             return -ENOMEM;
0693 
0694         table->nr = new_nr;
0695 
0696         spin_lock(&mm->ioctx_lock);
0697         old = rcu_dereference_raw(mm->ioctx_table);
0698 
0699         if (!old) {
0700             rcu_assign_pointer(mm->ioctx_table, table);
0701         } else if (table->nr > old->nr) {
0702             memcpy(table->table, old->table,
0703                    old->nr * sizeof(struct kioctx *));
0704 
0705             rcu_assign_pointer(mm->ioctx_table, table);
0706             kfree_rcu(old, rcu);
0707         } else {
0708             kfree(table);
0709             table = old;
0710         }
0711     }
0712 }
0713 
0714 static void aio_nr_sub(unsigned nr)
0715 {
0716     spin_lock(&aio_nr_lock);
0717     if (WARN_ON(aio_nr - nr > aio_nr))
0718         aio_nr = 0;
0719     else
0720         aio_nr -= nr;
0721     spin_unlock(&aio_nr_lock);
0722 }
0723 
0724 /* ioctx_alloc
0725  *  Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
0726  */
0727 static struct kioctx *ioctx_alloc(unsigned nr_events)
0728 {
0729     struct mm_struct *mm = current->mm;
0730     struct kioctx *ctx;
0731     int err = -ENOMEM;
0732 
0733     /*
0734      * Store the original nr_events -- what userspace passed to io_setup(),
0735      * for counting against the global limit -- before it changes.
0736      */
0737     unsigned int max_reqs = nr_events;
0738 
0739     /*
0740      * We keep track of the number of available ringbuffer slots, to prevent
0741      * overflow (reqs_available), and we also use percpu counters for this.
0742      *
0743      * So since up to half the slots might be on other cpu's percpu counters
0744      * and unavailable, double nr_events so userspace sees what they
0745      * expected: additionally, we move req_batch slots to/from percpu
0746      * counters at a time, so make sure that isn't 0:
0747      */
0748     nr_events = max(nr_events, num_possible_cpus() * 4);
0749     nr_events *= 2;
0750 
0751     /* Prevent overflows */
0752     if (nr_events > (0x10000000U / sizeof(struct io_event))) {
0753         pr_debug("ENOMEM: nr_events too high\n");
0754         return ERR_PTR(-EINVAL);
0755     }
0756 
0757     if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
0758         return ERR_PTR(-EAGAIN);
0759 
0760     ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
0761     if (!ctx)
0762         return ERR_PTR(-ENOMEM);
0763 
0764     ctx->max_reqs = max_reqs;
0765 
0766     spin_lock_init(&ctx->ctx_lock);
0767     spin_lock_init(&ctx->completion_lock);
0768     mutex_init(&ctx->ring_lock);
0769     /* Protect against page migration throughout kiotx setup by keeping
0770      * the ring_lock mutex held until setup is complete. */
0771     mutex_lock(&ctx->ring_lock);
0772     init_waitqueue_head(&ctx->wait);
0773 
0774     INIT_LIST_HEAD(&ctx->active_reqs);
0775 
0776     if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
0777         goto err;
0778 
0779     if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
0780         goto err;
0781 
0782     ctx->cpu = alloc_percpu(struct kioctx_cpu);
0783     if (!ctx->cpu)
0784         goto err;
0785 
0786     err = aio_setup_ring(ctx, nr_events);
0787     if (err < 0)
0788         goto err;
0789 
0790     atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
0791     ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
0792     if (ctx->req_batch < 1)
0793         ctx->req_batch = 1;
0794 
0795     /* limit the number of system wide aios */
0796     spin_lock(&aio_nr_lock);
0797     if (aio_nr + ctx->max_reqs > aio_max_nr ||
0798         aio_nr + ctx->max_reqs < aio_nr) {
0799         spin_unlock(&aio_nr_lock);
0800         err = -EAGAIN;
0801         goto err_ctx;
0802     }
0803     aio_nr += ctx->max_reqs;
0804     spin_unlock(&aio_nr_lock);
0805 
0806     percpu_ref_get(&ctx->users);    /* io_setup() will drop this ref */
0807     percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
0808 
0809     err = ioctx_add_table(ctx, mm);
0810     if (err)
0811         goto err_cleanup;
0812 
0813     /* Release the ring_lock mutex now that all setup is complete. */
0814     mutex_unlock(&ctx->ring_lock);
0815 
0816     pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
0817          ctx, ctx->user_id, mm, ctx->nr_events);
0818     return ctx;
0819 
0820 err_cleanup:
0821     aio_nr_sub(ctx->max_reqs);
0822 err_ctx:
0823     atomic_set(&ctx->dead, 1);
0824     if (ctx->mmap_size)
0825         vm_munmap(ctx->mmap_base, ctx->mmap_size);
0826     aio_free_ring(ctx);
0827 err:
0828     mutex_unlock(&ctx->ring_lock);
0829     free_percpu(ctx->cpu);
0830     percpu_ref_exit(&ctx->reqs);
0831     percpu_ref_exit(&ctx->users);
0832     kmem_cache_free(kioctx_cachep, ctx);
0833     pr_debug("error allocating ioctx %d\n", err);
0834     return ERR_PTR(err);
0835 }
0836 
0837 /* kill_ioctx
0838  *  Cancels all outstanding aio requests on an aio context.  Used
0839  *  when the processes owning a context have all exited to encourage
0840  *  the rapid destruction of the kioctx.
0841  */
0842 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
0843               struct ctx_rq_wait *wait)
0844 {
0845     struct kioctx_table *table;
0846 
0847     spin_lock(&mm->ioctx_lock);
0848     if (atomic_xchg(&ctx->dead, 1)) {
0849         spin_unlock(&mm->ioctx_lock);
0850         return -EINVAL;
0851     }
0852 
0853     table = rcu_dereference_raw(mm->ioctx_table);
0854     WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
0855     RCU_INIT_POINTER(table->table[ctx->id], NULL);
0856     spin_unlock(&mm->ioctx_lock);
0857 
0858     /* free_ioctx_reqs() will do the necessary RCU synchronization */
0859     wake_up_all(&ctx->wait);
0860 
0861     /*
0862      * It'd be more correct to do this in free_ioctx(), after all
0863      * the outstanding kiocbs have finished - but by then io_destroy
0864      * has already returned, so io_setup() could potentially return
0865      * -EAGAIN with no ioctxs actually in use (as far as userspace
0866      *  could tell).
0867      */
0868     aio_nr_sub(ctx->max_reqs);
0869 
0870     if (ctx->mmap_size)
0871         vm_munmap(ctx->mmap_base, ctx->mmap_size);
0872 
0873     ctx->rq_wait = wait;
0874     percpu_ref_kill(&ctx->users);
0875     return 0;
0876 }
0877 
0878 /*
0879  * exit_aio: called when the last user of mm goes away.  At this point, there is
0880  * no way for any new requests to be submited or any of the io_* syscalls to be
0881  * called on the context.
0882  *
0883  * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
0884  * them.
0885  */
0886 void exit_aio(struct mm_struct *mm)
0887 {
0888     struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
0889     struct ctx_rq_wait wait;
0890     int i, skipped;
0891 
0892     if (!table)
0893         return;
0894 
0895     atomic_set(&wait.count, table->nr);
0896     init_completion(&wait.comp);
0897 
0898     skipped = 0;
0899     for (i = 0; i < table->nr; ++i) {
0900         struct kioctx *ctx =
0901             rcu_dereference_protected(table->table[i], true);
0902 
0903         if (!ctx) {
0904             skipped++;
0905             continue;
0906         }
0907 
0908         /*
0909          * We don't need to bother with munmap() here - exit_mmap(mm)
0910          * is coming and it'll unmap everything. And we simply can't,
0911          * this is not necessarily our ->mm.
0912          * Since kill_ioctx() uses non-zero ->mmap_size as indicator
0913          * that it needs to unmap the area, just set it to 0.
0914          */
0915         ctx->mmap_size = 0;
0916         kill_ioctx(mm, ctx, &wait);
0917     }
0918 
0919     if (!atomic_sub_and_test(skipped, &wait.count)) {
0920         /* Wait until all IO for the context are done. */
0921         wait_for_completion(&wait.comp);
0922     }
0923 
0924     RCU_INIT_POINTER(mm->ioctx_table, NULL);
0925     kfree(table);
0926 }
0927 
0928 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
0929 {
0930     struct kioctx_cpu *kcpu;
0931     unsigned long flags;
0932 
0933     local_irq_save(flags);
0934     kcpu = this_cpu_ptr(ctx->cpu);
0935     kcpu->reqs_available += nr;
0936 
0937     while (kcpu->reqs_available >= ctx->req_batch * 2) {
0938         kcpu->reqs_available -= ctx->req_batch;
0939         atomic_add(ctx->req_batch, &ctx->reqs_available);
0940     }
0941 
0942     local_irq_restore(flags);
0943 }
0944 
0945 static bool __get_reqs_available(struct kioctx *ctx)
0946 {
0947     struct kioctx_cpu *kcpu;
0948     bool ret = false;
0949     unsigned long flags;
0950 
0951     local_irq_save(flags);
0952     kcpu = this_cpu_ptr(ctx->cpu);
0953     if (!kcpu->reqs_available) {
0954         int old, avail = atomic_read(&ctx->reqs_available);
0955 
0956         do {
0957             if (avail < ctx->req_batch)
0958                 goto out;
0959 
0960             old = avail;
0961             avail = atomic_cmpxchg(&ctx->reqs_available,
0962                            avail, avail - ctx->req_batch);
0963         } while (avail != old);
0964 
0965         kcpu->reqs_available += ctx->req_batch;
0966     }
0967 
0968     ret = true;
0969     kcpu->reqs_available--;
0970 out:
0971     local_irq_restore(flags);
0972     return ret;
0973 }
0974 
0975 /* refill_reqs_available
0976  *  Updates the reqs_available reference counts used for tracking the
0977  *  number of free slots in the completion ring.  This can be called
0978  *  from aio_complete() (to optimistically update reqs_available) or
0979  *  from aio_get_req() (the we're out of events case).  It must be
0980  *  called holding ctx->completion_lock.
0981  */
0982 static void refill_reqs_available(struct kioctx *ctx, unsigned head,
0983                                   unsigned tail)
0984 {
0985     unsigned events_in_ring, completed;
0986 
0987     /* Clamp head since userland can write to it. */
0988     head %= ctx->nr_events;
0989     if (head <= tail)
0990         events_in_ring = tail - head;
0991     else
0992         events_in_ring = ctx->nr_events - (head - tail);
0993 
0994     completed = ctx->completed_events;
0995     if (events_in_ring < completed)
0996         completed -= events_in_ring;
0997     else
0998         completed = 0;
0999 
1000     if (!completed)
1001         return;
1002 
1003     ctx->completed_events -= completed;
1004     put_reqs_available(ctx, completed);
1005 }
1006 
1007 /* user_refill_reqs_available
1008  *  Called to refill reqs_available when aio_get_req() encounters an
1009  *  out of space in the completion ring.
1010  */
1011 static void user_refill_reqs_available(struct kioctx *ctx)
1012 {
1013     spin_lock_irq(&ctx->completion_lock);
1014     if (ctx->completed_events) {
1015         struct aio_ring *ring;
1016         unsigned head;
1017 
1018         /* Access of ring->head may race with aio_read_events_ring()
1019          * here, but that's okay since whether we read the old version
1020          * or the new version, and either will be valid.  The important
1021          * part is that head cannot pass tail since we prevent
1022          * aio_complete() from updating tail by holding
1023          * ctx->completion_lock.  Even if head is invalid, the check
1024          * against ctx->completed_events below will make sure we do the
1025          * safe/right thing.
1026          */
1027         ring = kmap_atomic(ctx->ring_pages[0]);
1028         head = ring->head;
1029         kunmap_atomic(ring);
1030 
1031         refill_reqs_available(ctx, head, ctx->tail);
1032     }
1033 
1034     spin_unlock_irq(&ctx->completion_lock);
1035 }
1036 
1037 static bool get_reqs_available(struct kioctx *ctx)
1038 {
1039     if (__get_reqs_available(ctx))
1040         return true;
1041     user_refill_reqs_available(ctx);
1042     return __get_reqs_available(ctx);
1043 }
1044 
1045 /* aio_get_req
1046  *  Allocate a slot for an aio request.
1047  * Returns NULL if no requests are free.
1048  *
1049  * The refcount is initialized to 2 - one for the async op completion,
1050  * one for the synchronous code that does this.
1051  */
1052 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1053 {
1054     struct aio_kiocb *req;
1055 
1056     req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1057     if (unlikely(!req))
1058         return NULL;
1059 
1060     if (unlikely(!get_reqs_available(ctx))) {
1061         kmem_cache_free(kiocb_cachep, req);
1062         return NULL;
1063     }
1064 
1065     percpu_ref_get(&ctx->reqs);
1066     req->ki_ctx = ctx;
1067     INIT_LIST_HEAD(&req->ki_list);
1068     refcount_set(&req->ki_refcnt, 2);
1069     req->ki_eventfd = NULL;
1070     return req;
1071 }
1072 
1073 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1074 {
1075     struct aio_ring __user *ring  = (void __user *)ctx_id;
1076     struct mm_struct *mm = current->mm;
1077     struct kioctx *ctx, *ret = NULL;
1078     struct kioctx_table *table;
1079     unsigned id;
1080 
1081     if (get_user(id, &ring->id))
1082         return NULL;
1083 
1084     rcu_read_lock();
1085     table = rcu_dereference(mm->ioctx_table);
1086 
1087     if (!table || id >= table->nr)
1088         goto out;
1089 
1090     id = array_index_nospec(id, table->nr);
1091     ctx = rcu_dereference(table->table[id]);
1092     if (ctx && ctx->user_id == ctx_id) {
1093         if (percpu_ref_tryget_live(&ctx->users))
1094             ret = ctx;
1095     }
1096 out:
1097     rcu_read_unlock();
1098     return ret;
1099 }
1100 
1101 static inline void iocb_destroy(struct aio_kiocb *iocb)
1102 {
1103     if (iocb->ki_eventfd)
1104         eventfd_ctx_put(iocb->ki_eventfd);
1105     if (iocb->ki_filp)
1106         fput(iocb->ki_filp);
1107     percpu_ref_put(&iocb->ki_ctx->reqs);
1108     kmem_cache_free(kiocb_cachep, iocb);
1109 }
1110 
1111 /* aio_complete
1112  *  Called when the io request on the given iocb is complete.
1113  */
1114 static void aio_complete(struct aio_kiocb *iocb)
1115 {
1116     struct kioctx   *ctx = iocb->ki_ctx;
1117     struct aio_ring *ring;
1118     struct io_event *ev_page, *event;
1119     unsigned tail, pos, head;
1120     unsigned long   flags;
1121 
1122     /*
1123      * Add a completion event to the ring buffer. Must be done holding
1124      * ctx->completion_lock to prevent other code from messing with the tail
1125      * pointer since we might be called from irq context.
1126      */
1127     spin_lock_irqsave(&ctx->completion_lock, flags);
1128 
1129     tail = ctx->tail;
1130     pos = tail + AIO_EVENTS_OFFSET;
1131 
1132     if (++tail >= ctx->nr_events)
1133         tail = 0;
1134 
1135     ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1136     event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1137 
1138     *event = iocb->ki_res;
1139 
1140     kunmap_atomic(ev_page);
1141     flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1142 
1143     pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1144          (void __user *)(unsigned long)iocb->ki_res.obj,
1145          iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1146 
1147     /* after flagging the request as done, we
1148      * must never even look at it again
1149      */
1150     smp_wmb();  /* make event visible before updating tail */
1151 
1152     ctx->tail = tail;
1153 
1154     ring = kmap_atomic(ctx->ring_pages[0]);
1155     head = ring->head;
1156     ring->tail = tail;
1157     kunmap_atomic(ring);
1158     flush_dcache_page(ctx->ring_pages[0]);
1159 
1160     ctx->completed_events++;
1161     if (ctx->completed_events > 1)
1162         refill_reqs_available(ctx, head, tail);
1163     spin_unlock_irqrestore(&ctx->completion_lock, flags);
1164 
1165     pr_debug("added to ring %p at [%u]\n", iocb, tail);
1166 
1167     /*
1168      * Check if the user asked us to deliver the result through an
1169      * eventfd. The eventfd_signal() function is safe to be called
1170      * from IRQ context.
1171      */
1172     if (iocb->ki_eventfd)
1173         eventfd_signal(iocb->ki_eventfd, 1);
1174 
1175     /*
1176      * We have to order our ring_info tail store above and test
1177      * of the wait list below outside the wait lock.  This is
1178      * like in wake_up_bit() where clearing a bit has to be
1179      * ordered with the unlocked test.
1180      */
1181     smp_mb();
1182 
1183     if (waitqueue_active(&ctx->wait))
1184         wake_up(&ctx->wait);
1185 }
1186 
1187 static inline void iocb_put(struct aio_kiocb *iocb)
1188 {
1189     if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1190         aio_complete(iocb);
1191         iocb_destroy(iocb);
1192     }
1193 }
1194 
1195 /* aio_read_events_ring
1196  *  Pull an event off of the ioctx's event ring.  Returns the number of
1197  *  events fetched
1198  */
1199 static long aio_read_events_ring(struct kioctx *ctx,
1200                  struct io_event __user *event, long nr)
1201 {
1202     struct aio_ring *ring;
1203     unsigned head, tail, pos;
1204     long ret = 0;
1205     int copy_ret;
1206 
1207     /*
1208      * The mutex can block and wake us up and that will cause
1209      * wait_event_interruptible_hrtimeout() to schedule without sleeping
1210      * and repeat. This should be rare enough that it doesn't cause
1211      * peformance issues. See the comment in read_events() for more detail.
1212      */
1213     sched_annotate_sleep();
1214     mutex_lock(&ctx->ring_lock);
1215 
1216     /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1217     ring = kmap_atomic(ctx->ring_pages[0]);
1218     head = ring->head;
1219     tail = ring->tail;
1220     kunmap_atomic(ring);
1221 
1222     /*
1223      * Ensure that once we've read the current tail pointer, that
1224      * we also see the events that were stored up to the tail.
1225      */
1226     smp_rmb();
1227 
1228     pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1229 
1230     if (head == tail)
1231         goto out;
1232 
1233     head %= ctx->nr_events;
1234     tail %= ctx->nr_events;
1235 
1236     while (ret < nr) {
1237         long avail;
1238         struct io_event *ev;
1239         struct page *page;
1240 
1241         avail = (head <= tail ?  tail : ctx->nr_events) - head;
1242         if (head == tail)
1243             break;
1244 
1245         pos = head + AIO_EVENTS_OFFSET;
1246         page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1247         pos %= AIO_EVENTS_PER_PAGE;
1248 
1249         avail = min(avail, nr - ret);
1250         avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1251 
1252         ev = kmap(page);
1253         copy_ret = copy_to_user(event + ret, ev + pos,
1254                     sizeof(*ev) * avail);
1255         kunmap(page);
1256 
1257         if (unlikely(copy_ret)) {
1258             ret = -EFAULT;
1259             goto out;
1260         }
1261 
1262         ret += avail;
1263         head += avail;
1264         head %= ctx->nr_events;
1265     }
1266 
1267     ring = kmap_atomic(ctx->ring_pages[0]);
1268     ring->head = head;
1269     kunmap_atomic(ring);
1270     flush_dcache_page(ctx->ring_pages[0]);
1271 
1272     pr_debug("%li  h%u t%u\n", ret, head, tail);
1273 out:
1274     mutex_unlock(&ctx->ring_lock);
1275 
1276     return ret;
1277 }
1278 
1279 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1280                 struct io_event __user *event, long *i)
1281 {
1282     long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1283 
1284     if (ret > 0)
1285         *i += ret;
1286 
1287     if (unlikely(atomic_read(&ctx->dead)))
1288         ret = -EINVAL;
1289 
1290     if (!*i)
1291         *i = ret;
1292 
1293     return ret < 0 || *i >= min_nr;
1294 }
1295 
1296 static long read_events(struct kioctx *ctx, long min_nr, long nr,
1297             struct io_event __user *event,
1298             ktime_t until)
1299 {
1300     long ret = 0;
1301 
1302     /*
1303      * Note that aio_read_events() is being called as the conditional - i.e.
1304      * we're calling it after prepare_to_wait() has set task state to
1305      * TASK_INTERRUPTIBLE.
1306      *
1307      * But aio_read_events() can block, and if it blocks it's going to flip
1308      * the task state back to TASK_RUNNING.
1309      *
1310      * This should be ok, provided it doesn't flip the state back to
1311      * TASK_RUNNING and return 0 too much - that causes us to spin. That
1312      * will only happen if the mutex_lock() call blocks, and we then find
1313      * the ringbuffer empty. So in practice we should be ok, but it's
1314      * something to be aware of when touching this code.
1315      */
1316     if (until == 0)
1317         aio_read_events(ctx, min_nr, nr, event, &ret);
1318     else
1319         wait_event_interruptible_hrtimeout(ctx->wait,
1320                 aio_read_events(ctx, min_nr, nr, event, &ret),
1321                 until);
1322     return ret;
1323 }
1324 
1325 /* sys_io_setup:
1326  *  Create an aio_context capable of receiving at least nr_events.
1327  *  ctxp must not point to an aio_context that already exists, and
1328  *  must be initialized to 0 prior to the call.  On successful
1329  *  creation of the aio_context, *ctxp is filled in with the resulting 
1330  *  handle.  May fail with -EINVAL if *ctxp is not initialized,
1331  *  if the specified nr_events exceeds internal limits.  May fail 
1332  *  with -EAGAIN if the specified nr_events exceeds the user's limit 
1333  *  of available events.  May fail with -ENOMEM if insufficient kernel
1334  *  resources are available.  May fail with -EFAULT if an invalid
1335  *  pointer is passed for ctxp.  Will fail with -ENOSYS if not
1336  *  implemented.
1337  */
1338 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1339 {
1340     struct kioctx *ioctx = NULL;
1341     unsigned long ctx;
1342     long ret;
1343 
1344     ret = get_user(ctx, ctxp);
1345     if (unlikely(ret))
1346         goto out;
1347 
1348     ret = -EINVAL;
1349     if (unlikely(ctx || nr_events == 0)) {
1350         pr_debug("EINVAL: ctx %lu nr_events %u\n",
1351                  ctx, nr_events);
1352         goto out;
1353     }
1354 
1355     ioctx = ioctx_alloc(nr_events);
1356     ret = PTR_ERR(ioctx);
1357     if (!IS_ERR(ioctx)) {
1358         ret = put_user(ioctx->user_id, ctxp);
1359         if (ret)
1360             kill_ioctx(current->mm, ioctx, NULL);
1361         percpu_ref_put(&ioctx->users);
1362     }
1363 
1364 out:
1365     return ret;
1366 }
1367 
1368 #ifdef CONFIG_COMPAT
1369 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1370 {
1371     struct kioctx *ioctx = NULL;
1372     unsigned long ctx;
1373     long ret;
1374 
1375     ret = get_user(ctx, ctx32p);
1376     if (unlikely(ret))
1377         goto out;
1378 
1379     ret = -EINVAL;
1380     if (unlikely(ctx || nr_events == 0)) {
1381         pr_debug("EINVAL: ctx %lu nr_events %u\n",
1382                  ctx, nr_events);
1383         goto out;
1384     }
1385 
1386     ioctx = ioctx_alloc(nr_events);
1387     ret = PTR_ERR(ioctx);
1388     if (!IS_ERR(ioctx)) {
1389         /* truncating is ok because it's a user address */
1390         ret = put_user((u32)ioctx->user_id, ctx32p);
1391         if (ret)
1392             kill_ioctx(current->mm, ioctx, NULL);
1393         percpu_ref_put(&ioctx->users);
1394     }
1395 
1396 out:
1397     return ret;
1398 }
1399 #endif
1400 
1401 /* sys_io_destroy:
1402  *  Destroy the aio_context specified.  May cancel any outstanding 
1403  *  AIOs and block on completion.  Will fail with -ENOSYS if not
1404  *  implemented.  May fail with -EINVAL if the context pointed to
1405  *  is invalid.
1406  */
1407 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1408 {
1409     struct kioctx *ioctx = lookup_ioctx(ctx);
1410     if (likely(NULL != ioctx)) {
1411         struct ctx_rq_wait wait;
1412         int ret;
1413 
1414         init_completion(&wait.comp);
1415         atomic_set(&wait.count, 1);
1416 
1417         /* Pass requests_done to kill_ioctx() where it can be set
1418          * in a thread-safe way. If we try to set it here then we have
1419          * a race condition if two io_destroy() called simultaneously.
1420          */
1421         ret = kill_ioctx(current->mm, ioctx, &wait);
1422         percpu_ref_put(&ioctx->users);
1423 
1424         /* Wait until all IO for the context are done. Otherwise kernel
1425          * keep using user-space buffers even if user thinks the context
1426          * is destroyed.
1427          */
1428         if (!ret)
1429             wait_for_completion(&wait.comp);
1430 
1431         return ret;
1432     }
1433     pr_debug("EINVAL: invalid context id\n");
1434     return -EINVAL;
1435 }
1436 
1437 static void aio_remove_iocb(struct aio_kiocb *iocb)
1438 {
1439     struct kioctx *ctx = iocb->ki_ctx;
1440     unsigned long flags;
1441 
1442     spin_lock_irqsave(&ctx->ctx_lock, flags);
1443     list_del(&iocb->ki_list);
1444     spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1445 }
1446 
1447 static void aio_complete_rw(struct kiocb *kiocb, long res)
1448 {
1449     struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1450 
1451     if (!list_empty_careful(&iocb->ki_list))
1452         aio_remove_iocb(iocb);
1453 
1454     if (kiocb->ki_flags & IOCB_WRITE) {
1455         struct inode *inode = file_inode(kiocb->ki_filp);
1456 
1457         /*
1458          * Tell lockdep we inherited freeze protection from submission
1459          * thread.
1460          */
1461         if (S_ISREG(inode->i_mode))
1462             __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1463         file_end_write(kiocb->ki_filp);
1464     }
1465 
1466     iocb->ki_res.res = res;
1467     iocb->ki_res.res2 = 0;
1468     iocb_put(iocb);
1469 }
1470 
1471 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1472 {
1473     int ret;
1474 
1475     req->ki_complete = aio_complete_rw;
1476     req->private = NULL;
1477     req->ki_pos = iocb->aio_offset;
1478     req->ki_flags = req->ki_filp->f_iocb_flags;
1479     if (iocb->aio_flags & IOCB_FLAG_RESFD)
1480         req->ki_flags |= IOCB_EVENTFD;
1481     if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1482         /*
1483          * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1484          * aio_reqprio is interpreted as an I/O scheduling
1485          * class and priority.
1486          */
1487         ret = ioprio_check_cap(iocb->aio_reqprio);
1488         if (ret) {
1489             pr_debug("aio ioprio check cap error: %d\n", ret);
1490             return ret;
1491         }
1492 
1493         req->ki_ioprio = iocb->aio_reqprio;
1494     } else
1495         req->ki_ioprio = get_current_ioprio();
1496 
1497     ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1498     if (unlikely(ret))
1499         return ret;
1500 
1501     req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1502     return 0;
1503 }
1504 
1505 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1506         struct iovec **iovec, bool vectored, bool compat,
1507         struct iov_iter *iter)
1508 {
1509     void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1510     size_t len = iocb->aio_nbytes;
1511 
1512     if (!vectored) {
1513         ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1514         *iovec = NULL;
1515         return ret;
1516     }
1517 
1518     return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1519 }
1520 
1521 static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1522 {
1523     switch (ret) {
1524     case -EIOCBQUEUED:
1525         break;
1526     case -ERESTARTSYS:
1527     case -ERESTARTNOINTR:
1528     case -ERESTARTNOHAND:
1529     case -ERESTART_RESTARTBLOCK:
1530         /*
1531          * There's no easy way to restart the syscall since other AIO's
1532          * may be already running. Just fail this IO with EINTR.
1533          */
1534         ret = -EINTR;
1535         fallthrough;
1536     default:
1537         req->ki_complete(req, ret);
1538     }
1539 }
1540 
1541 static int aio_read(struct kiocb *req, const struct iocb *iocb,
1542             bool vectored, bool compat)
1543 {
1544     struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1545     struct iov_iter iter;
1546     struct file *file;
1547     int ret;
1548 
1549     ret = aio_prep_rw(req, iocb);
1550     if (ret)
1551         return ret;
1552     file = req->ki_filp;
1553     if (unlikely(!(file->f_mode & FMODE_READ)))
1554         return -EBADF;
1555     if (unlikely(!file->f_op->read_iter))
1556         return -EINVAL;
1557 
1558     ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1559     if (ret < 0)
1560         return ret;
1561     ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1562     if (!ret)
1563         aio_rw_done(req, call_read_iter(file, req, &iter));
1564     kfree(iovec);
1565     return ret;
1566 }
1567 
1568 static int aio_write(struct kiocb *req, const struct iocb *iocb,
1569              bool vectored, bool compat)
1570 {
1571     struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1572     struct iov_iter iter;
1573     struct file *file;
1574     int ret;
1575 
1576     ret = aio_prep_rw(req, iocb);
1577     if (ret)
1578         return ret;
1579     file = req->ki_filp;
1580 
1581     if (unlikely(!(file->f_mode & FMODE_WRITE)))
1582         return -EBADF;
1583     if (unlikely(!file->f_op->write_iter))
1584         return -EINVAL;
1585 
1586     ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1587     if (ret < 0)
1588         return ret;
1589     ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1590     if (!ret) {
1591         /*
1592          * Open-code file_start_write here to grab freeze protection,
1593          * which will be released by another thread in
1594          * aio_complete_rw().  Fool lockdep by telling it the lock got
1595          * released so that it doesn't complain about the held lock when
1596          * we return to userspace.
1597          */
1598         if (S_ISREG(file_inode(file)->i_mode)) {
1599             sb_start_write(file_inode(file)->i_sb);
1600             __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1601         }
1602         req->ki_flags |= IOCB_WRITE;
1603         aio_rw_done(req, call_write_iter(file, req, &iter));
1604     }
1605     kfree(iovec);
1606     return ret;
1607 }
1608 
1609 static void aio_fsync_work(struct work_struct *work)
1610 {
1611     struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1612     const struct cred *old_cred = override_creds(iocb->fsync.creds);
1613 
1614     iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1615     revert_creds(old_cred);
1616     put_cred(iocb->fsync.creds);
1617     iocb_put(iocb);
1618 }
1619 
1620 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1621              bool datasync)
1622 {
1623     if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1624             iocb->aio_rw_flags))
1625         return -EINVAL;
1626 
1627     if (unlikely(!req->file->f_op->fsync))
1628         return -EINVAL;
1629 
1630     req->creds = prepare_creds();
1631     if (!req->creds)
1632         return -ENOMEM;
1633 
1634     req->datasync = datasync;
1635     INIT_WORK(&req->work, aio_fsync_work);
1636     schedule_work(&req->work);
1637     return 0;
1638 }
1639 
1640 static void aio_poll_put_work(struct work_struct *work)
1641 {
1642     struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1643     struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1644 
1645     iocb_put(iocb);
1646 }
1647 
1648 /*
1649  * Safely lock the waitqueue which the request is on, synchronizing with the
1650  * case where the ->poll() provider decides to free its waitqueue early.
1651  *
1652  * Returns true on success, meaning that req->head->lock was locked, req->wait
1653  * is on req->head, and an RCU read lock was taken.  Returns false if the
1654  * request was already removed from its waitqueue (which might no longer exist).
1655  */
1656 static bool poll_iocb_lock_wq(struct poll_iocb *req)
1657 {
1658     wait_queue_head_t *head;
1659 
1660     /*
1661      * While we hold the waitqueue lock and the waitqueue is nonempty,
1662      * wake_up_pollfree() will wait for us.  However, taking the waitqueue
1663      * lock in the first place can race with the waitqueue being freed.
1664      *
1665      * We solve this as eventpoll does: by taking advantage of the fact that
1666      * all users of wake_up_pollfree() will RCU-delay the actual free.  If
1667      * we enter rcu_read_lock() and see that the pointer to the queue is
1668      * non-NULL, we can then lock it without the memory being freed out from
1669      * under us, then check whether the request is still on the queue.
1670      *
1671      * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1672      * case the caller deletes the entry from the queue, leaving it empty.
1673      * In that case, only RCU prevents the queue memory from being freed.
1674      */
1675     rcu_read_lock();
1676     head = smp_load_acquire(&req->head);
1677     if (head) {
1678         spin_lock(&head->lock);
1679         if (!list_empty(&req->wait.entry))
1680             return true;
1681         spin_unlock(&head->lock);
1682     }
1683     rcu_read_unlock();
1684     return false;
1685 }
1686 
1687 static void poll_iocb_unlock_wq(struct poll_iocb *req)
1688 {
1689     spin_unlock(&req->head->lock);
1690     rcu_read_unlock();
1691 }
1692 
1693 static void aio_poll_complete_work(struct work_struct *work)
1694 {
1695     struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1696     struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1697     struct poll_table_struct pt = { ._key = req->events };
1698     struct kioctx *ctx = iocb->ki_ctx;
1699     __poll_t mask = 0;
1700 
1701     if (!READ_ONCE(req->cancelled))
1702         mask = vfs_poll(req->file, &pt) & req->events;
1703 
1704     /*
1705      * Note that ->ki_cancel callers also delete iocb from active_reqs after
1706      * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1707      * synchronize with them.  In the cancellation case the list_del_init
1708      * itself is not actually needed, but harmless so we keep it in to
1709      * avoid further branches in the fast path.
1710      */
1711     spin_lock_irq(&ctx->ctx_lock);
1712     if (poll_iocb_lock_wq(req)) {
1713         if (!mask && !READ_ONCE(req->cancelled)) {
1714             /*
1715              * The request isn't actually ready to be completed yet.
1716              * Reschedule completion if another wakeup came in.
1717              */
1718             if (req->work_need_resched) {
1719                 schedule_work(&req->work);
1720                 req->work_need_resched = false;
1721             } else {
1722                 req->work_scheduled = false;
1723             }
1724             poll_iocb_unlock_wq(req);
1725             spin_unlock_irq(&ctx->ctx_lock);
1726             return;
1727         }
1728         list_del_init(&req->wait.entry);
1729         poll_iocb_unlock_wq(req);
1730     } /* else, POLLFREE has freed the waitqueue, so we must complete */
1731     list_del_init(&iocb->ki_list);
1732     iocb->ki_res.res = mangle_poll(mask);
1733     spin_unlock_irq(&ctx->ctx_lock);
1734 
1735     iocb_put(iocb);
1736 }
1737 
1738 /* assumes we are called with irqs disabled */
1739 static int aio_poll_cancel(struct kiocb *iocb)
1740 {
1741     struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1742     struct poll_iocb *req = &aiocb->poll;
1743 
1744     if (poll_iocb_lock_wq(req)) {
1745         WRITE_ONCE(req->cancelled, true);
1746         if (!req->work_scheduled) {
1747             schedule_work(&aiocb->poll.work);
1748             req->work_scheduled = true;
1749         }
1750         poll_iocb_unlock_wq(req);
1751     } /* else, the request was force-cancelled by POLLFREE already */
1752 
1753     return 0;
1754 }
1755 
1756 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1757         void *key)
1758 {
1759     struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1760     struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1761     __poll_t mask = key_to_poll(key);
1762     unsigned long flags;
1763 
1764     /* for instances that support it check for an event match first: */
1765     if (mask && !(mask & req->events))
1766         return 0;
1767 
1768     /*
1769      * Complete the request inline if possible.  This requires that three
1770      * conditions be met:
1771      *   1. An event mask must have been passed.  If a plain wakeup was done
1772      *  instead, then mask == 0 and we have to call vfs_poll() to get
1773      *  the events, so inline completion isn't possible.
1774      *   2. The completion work must not have already been scheduled.
1775      *   3. ctx_lock must not be busy.  We have to use trylock because we
1776      *  already hold the waitqueue lock, so this inverts the normal
1777      *  locking order.  Use irqsave/irqrestore because not all
1778      *  filesystems (e.g. fuse) call this function with IRQs disabled,
1779      *  yet IRQs have to be disabled before ctx_lock is obtained.
1780      */
1781     if (mask && !req->work_scheduled &&
1782         spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1783         struct kioctx *ctx = iocb->ki_ctx;
1784 
1785         list_del_init(&req->wait.entry);
1786         list_del(&iocb->ki_list);
1787         iocb->ki_res.res = mangle_poll(mask);
1788         if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1789             iocb = NULL;
1790             INIT_WORK(&req->work, aio_poll_put_work);
1791             schedule_work(&req->work);
1792         }
1793         spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1794         if (iocb)
1795             iocb_put(iocb);
1796     } else {
1797         /*
1798          * Schedule the completion work if needed.  If it was already
1799          * scheduled, record that another wakeup came in.
1800          *
1801          * Don't remove the request from the waitqueue here, as it might
1802          * not actually be complete yet (we won't know until vfs_poll()
1803          * is called), and we must not miss any wakeups.  POLLFREE is an
1804          * exception to this; see below.
1805          */
1806         if (req->work_scheduled) {
1807             req->work_need_resched = true;
1808         } else {
1809             schedule_work(&req->work);
1810             req->work_scheduled = true;
1811         }
1812 
1813         /*
1814          * If the waitqueue is being freed early but we can't complete
1815          * the request inline, we have to tear down the request as best
1816          * we can.  That means immediately removing the request from its
1817          * waitqueue and preventing all further accesses to the
1818          * waitqueue via the request.  We also need to schedule the
1819          * completion work (done above).  Also mark the request as
1820          * cancelled, to potentially skip an unneeded call to ->poll().
1821          */
1822         if (mask & POLLFREE) {
1823             WRITE_ONCE(req->cancelled, true);
1824             list_del_init(&req->wait.entry);
1825 
1826             /*
1827              * Careful: this *must* be the last step, since as soon
1828              * as req->head is NULL'ed out, the request can be
1829              * completed and freed, since aio_poll_complete_work()
1830              * will no longer need to take the waitqueue lock.
1831              */
1832             smp_store_release(&req->head, NULL);
1833         }
1834     }
1835     return 1;
1836 }
1837 
1838 struct aio_poll_table {
1839     struct poll_table_struct    pt;
1840     struct aio_kiocb        *iocb;
1841     bool                queued;
1842     int             error;
1843 };
1844 
1845 static void
1846 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1847         struct poll_table_struct *p)
1848 {
1849     struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1850 
1851     /* multiple wait queues per file are not supported */
1852     if (unlikely(pt->queued)) {
1853         pt->error = -EINVAL;
1854         return;
1855     }
1856 
1857     pt->queued = true;
1858     pt->error = 0;
1859     pt->iocb->poll.head = head;
1860     add_wait_queue(head, &pt->iocb->poll.wait);
1861 }
1862 
1863 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1864 {
1865     struct kioctx *ctx = aiocb->ki_ctx;
1866     struct poll_iocb *req = &aiocb->poll;
1867     struct aio_poll_table apt;
1868     bool cancel = false;
1869     __poll_t mask;
1870 
1871     /* reject any unknown events outside the normal event mask. */
1872     if ((u16)iocb->aio_buf != iocb->aio_buf)
1873         return -EINVAL;
1874     /* reject fields that are not defined for poll */
1875     if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1876         return -EINVAL;
1877 
1878     INIT_WORK(&req->work, aio_poll_complete_work);
1879     req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1880 
1881     req->head = NULL;
1882     req->cancelled = false;
1883     req->work_scheduled = false;
1884     req->work_need_resched = false;
1885 
1886     apt.pt._qproc = aio_poll_queue_proc;
1887     apt.pt._key = req->events;
1888     apt.iocb = aiocb;
1889     apt.queued = false;
1890     apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1891 
1892     /* initialized the list so that we can do list_empty checks */
1893     INIT_LIST_HEAD(&req->wait.entry);
1894     init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1895 
1896     mask = vfs_poll(req->file, &apt.pt) & req->events;
1897     spin_lock_irq(&ctx->ctx_lock);
1898     if (likely(apt.queued)) {
1899         bool on_queue = poll_iocb_lock_wq(req);
1900 
1901         if (!on_queue || req->work_scheduled) {
1902             /*
1903              * aio_poll_wake() already either scheduled the async
1904              * completion work, or completed the request inline.
1905              */
1906             if (apt.error) /* unsupported case: multiple queues */
1907                 cancel = true;
1908             apt.error = 0;
1909             mask = 0;
1910         }
1911         if (mask || apt.error) {
1912             /* Steal to complete synchronously. */
1913             list_del_init(&req->wait.entry);
1914         } else if (cancel) {
1915             /* Cancel if possible (may be too late though). */
1916             WRITE_ONCE(req->cancelled, true);
1917         } else if (on_queue) {
1918             /*
1919              * Actually waiting for an event, so add the request to
1920              * active_reqs so that it can be cancelled if needed.
1921              */
1922             list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1923             aiocb->ki_cancel = aio_poll_cancel;
1924         }
1925         if (on_queue)
1926             poll_iocb_unlock_wq(req);
1927     }
1928     if (mask) { /* no async, we'd stolen it */
1929         aiocb->ki_res.res = mangle_poll(mask);
1930         apt.error = 0;
1931     }
1932     spin_unlock_irq(&ctx->ctx_lock);
1933     if (mask)
1934         iocb_put(aiocb);
1935     return apt.error;
1936 }
1937 
1938 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1939                struct iocb __user *user_iocb, struct aio_kiocb *req,
1940                bool compat)
1941 {
1942     req->ki_filp = fget(iocb->aio_fildes);
1943     if (unlikely(!req->ki_filp))
1944         return -EBADF;
1945 
1946     if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1947         struct eventfd_ctx *eventfd;
1948         /*
1949          * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1950          * instance of the file* now. The file descriptor must be
1951          * an eventfd() fd, and will be signaled for each completed
1952          * event using the eventfd_signal() function.
1953          */
1954         eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1955         if (IS_ERR(eventfd))
1956             return PTR_ERR(eventfd);
1957 
1958         req->ki_eventfd = eventfd;
1959     }
1960 
1961     if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1962         pr_debug("EFAULT: aio_key\n");
1963         return -EFAULT;
1964     }
1965 
1966     req->ki_res.obj = (u64)(unsigned long)user_iocb;
1967     req->ki_res.data = iocb->aio_data;
1968     req->ki_res.res = 0;
1969     req->ki_res.res2 = 0;
1970 
1971     switch (iocb->aio_lio_opcode) {
1972     case IOCB_CMD_PREAD:
1973         return aio_read(&req->rw, iocb, false, compat);
1974     case IOCB_CMD_PWRITE:
1975         return aio_write(&req->rw, iocb, false, compat);
1976     case IOCB_CMD_PREADV:
1977         return aio_read(&req->rw, iocb, true, compat);
1978     case IOCB_CMD_PWRITEV:
1979         return aio_write(&req->rw, iocb, true, compat);
1980     case IOCB_CMD_FSYNC:
1981         return aio_fsync(&req->fsync, iocb, false);
1982     case IOCB_CMD_FDSYNC:
1983         return aio_fsync(&req->fsync, iocb, true);
1984     case IOCB_CMD_POLL:
1985         return aio_poll(req, iocb);
1986     default:
1987         pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1988         return -EINVAL;
1989     }
1990 }
1991 
1992 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1993              bool compat)
1994 {
1995     struct aio_kiocb *req;
1996     struct iocb iocb;
1997     int err;
1998 
1999     if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2000         return -EFAULT;
2001 
2002     /* enforce forwards compatibility on users */
2003     if (unlikely(iocb.aio_reserved2)) {
2004         pr_debug("EINVAL: reserve field set\n");
2005         return -EINVAL;
2006     }
2007 
2008     /* prevent overflows */
2009     if (unlikely(
2010         (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2011         (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2012         ((ssize_t)iocb.aio_nbytes < 0)
2013        )) {
2014         pr_debug("EINVAL: overflow check\n");
2015         return -EINVAL;
2016     }
2017 
2018     req = aio_get_req(ctx);
2019     if (unlikely(!req))
2020         return -EAGAIN;
2021 
2022     err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2023 
2024     /* Done with the synchronous reference */
2025     iocb_put(req);
2026 
2027     /*
2028      * If err is 0, we'd either done aio_complete() ourselves or have
2029      * arranged for that to be done asynchronously.  Anything non-zero
2030      * means that we need to destroy req ourselves.
2031      */
2032     if (unlikely(err)) {
2033         iocb_destroy(req);
2034         put_reqs_available(ctx, 1);
2035     }
2036     return err;
2037 }
2038 
2039 /* sys_io_submit:
2040  *  Queue the nr iocbs pointed to by iocbpp for processing.  Returns
2041  *  the number of iocbs queued.  May return -EINVAL if the aio_context
2042  *  specified by ctx_id is invalid, if nr is < 0, if the iocb at
2043  *  *iocbpp[0] is not properly initialized, if the operation specified
2044  *  is invalid for the file descriptor in the iocb.  May fail with
2045  *  -EFAULT if any of the data structures point to invalid data.  May
2046  *  fail with -EBADF if the file descriptor specified in the first
2047  *  iocb is invalid.  May fail with -EAGAIN if insufficient resources
2048  *  are available to queue any iocbs.  Will return 0 if nr is 0.  Will
2049  *  fail with -ENOSYS if not implemented.
2050  */
2051 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2052         struct iocb __user * __user *, iocbpp)
2053 {
2054     struct kioctx *ctx;
2055     long ret = 0;
2056     int i = 0;
2057     struct blk_plug plug;
2058 
2059     if (unlikely(nr < 0))
2060         return -EINVAL;
2061 
2062     ctx = lookup_ioctx(ctx_id);
2063     if (unlikely(!ctx)) {
2064         pr_debug("EINVAL: invalid context id\n");
2065         return -EINVAL;
2066     }
2067 
2068     if (nr > ctx->nr_events)
2069         nr = ctx->nr_events;
2070 
2071     if (nr > AIO_PLUG_THRESHOLD)
2072         blk_start_plug(&plug);
2073     for (i = 0; i < nr; i++) {
2074         struct iocb __user *user_iocb;
2075 
2076         if (unlikely(get_user(user_iocb, iocbpp + i))) {
2077             ret = -EFAULT;
2078             break;
2079         }
2080 
2081         ret = io_submit_one(ctx, user_iocb, false);
2082         if (ret)
2083             break;
2084     }
2085     if (nr > AIO_PLUG_THRESHOLD)
2086         blk_finish_plug(&plug);
2087 
2088     percpu_ref_put(&ctx->users);
2089     return i ? i : ret;
2090 }
2091 
2092 #ifdef CONFIG_COMPAT
2093 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2094                int, nr, compat_uptr_t __user *, iocbpp)
2095 {
2096     struct kioctx *ctx;
2097     long ret = 0;
2098     int i = 0;
2099     struct blk_plug plug;
2100 
2101     if (unlikely(nr < 0))
2102         return -EINVAL;
2103 
2104     ctx = lookup_ioctx(ctx_id);
2105     if (unlikely(!ctx)) {
2106         pr_debug("EINVAL: invalid context id\n");
2107         return -EINVAL;
2108     }
2109 
2110     if (nr > ctx->nr_events)
2111         nr = ctx->nr_events;
2112 
2113     if (nr > AIO_PLUG_THRESHOLD)
2114         blk_start_plug(&plug);
2115     for (i = 0; i < nr; i++) {
2116         compat_uptr_t user_iocb;
2117 
2118         if (unlikely(get_user(user_iocb, iocbpp + i))) {
2119             ret = -EFAULT;
2120             break;
2121         }
2122 
2123         ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2124         if (ret)
2125             break;
2126     }
2127     if (nr > AIO_PLUG_THRESHOLD)
2128         blk_finish_plug(&plug);
2129 
2130     percpu_ref_put(&ctx->users);
2131     return i ? i : ret;
2132 }
2133 #endif
2134 
2135 /* sys_io_cancel:
2136  *  Attempts to cancel an iocb previously passed to io_submit.  If
2137  *  the operation is successfully cancelled, the resulting event is
2138  *  copied into the memory pointed to by result without being placed
2139  *  into the completion queue and 0 is returned.  May fail with
2140  *  -EFAULT if any of the data structures pointed to are invalid.
2141  *  May fail with -EINVAL if aio_context specified by ctx_id is
2142  *  invalid.  May fail with -EAGAIN if the iocb specified was not
2143  *  cancelled.  Will fail with -ENOSYS if not implemented.
2144  */
2145 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2146         struct io_event __user *, result)
2147 {
2148     struct kioctx *ctx;
2149     struct aio_kiocb *kiocb;
2150     int ret = -EINVAL;
2151     u32 key;
2152     u64 obj = (u64)(unsigned long)iocb;
2153 
2154     if (unlikely(get_user(key, &iocb->aio_key)))
2155         return -EFAULT;
2156     if (unlikely(key != KIOCB_KEY))
2157         return -EINVAL;
2158 
2159     ctx = lookup_ioctx(ctx_id);
2160     if (unlikely(!ctx))
2161         return -EINVAL;
2162 
2163     spin_lock_irq(&ctx->ctx_lock);
2164     /* TODO: use a hash or array, this sucks. */
2165     list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2166         if (kiocb->ki_res.obj == obj) {
2167             ret = kiocb->ki_cancel(&kiocb->rw);
2168             list_del_init(&kiocb->ki_list);
2169             break;
2170         }
2171     }
2172     spin_unlock_irq(&ctx->ctx_lock);
2173 
2174     if (!ret) {
2175         /*
2176          * The result argument is no longer used - the io_event is
2177          * always delivered via the ring buffer. -EINPROGRESS indicates
2178          * cancellation is progress:
2179          */
2180         ret = -EINPROGRESS;
2181     }
2182 
2183     percpu_ref_put(&ctx->users);
2184 
2185     return ret;
2186 }
2187 
2188 static long do_io_getevents(aio_context_t ctx_id,
2189         long min_nr,
2190         long nr,
2191         struct io_event __user *events,
2192         struct timespec64 *ts)
2193 {
2194     ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2195     struct kioctx *ioctx = lookup_ioctx(ctx_id);
2196     long ret = -EINVAL;
2197 
2198     if (likely(ioctx)) {
2199         if (likely(min_nr <= nr && min_nr >= 0))
2200             ret = read_events(ioctx, min_nr, nr, events, until);
2201         percpu_ref_put(&ioctx->users);
2202     }
2203 
2204     return ret;
2205 }
2206 
2207 /* io_getevents:
2208  *  Attempts to read at least min_nr events and up to nr events from
2209  *  the completion queue for the aio_context specified by ctx_id. If
2210  *  it succeeds, the number of read events is returned. May fail with
2211  *  -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2212  *  out of range, if timeout is out of range.  May fail with -EFAULT
2213  *  if any of the memory specified is invalid.  May return 0 or
2214  *  < min_nr if the timeout specified by timeout has elapsed
2215  *  before sufficient events are available, where timeout == NULL
2216  *  specifies an infinite timeout. Note that the timeout pointed to by
2217  *  timeout is relative.  Will fail with -ENOSYS if not implemented.
2218  */
2219 #ifdef CONFIG_64BIT
2220 
2221 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2222         long, min_nr,
2223         long, nr,
2224         struct io_event __user *, events,
2225         struct __kernel_timespec __user *, timeout)
2226 {
2227     struct timespec64   ts;
2228     int         ret;
2229 
2230     if (timeout && unlikely(get_timespec64(&ts, timeout)))
2231         return -EFAULT;
2232 
2233     ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2234     if (!ret && signal_pending(current))
2235         ret = -EINTR;
2236     return ret;
2237 }
2238 
2239 #endif
2240 
2241 struct __aio_sigset {
2242     const sigset_t __user   *sigmask;
2243     size_t      sigsetsize;
2244 };
2245 
2246 SYSCALL_DEFINE6(io_pgetevents,
2247         aio_context_t, ctx_id,
2248         long, min_nr,
2249         long, nr,
2250         struct io_event __user *, events,
2251         struct __kernel_timespec __user *, timeout,
2252         const struct __aio_sigset __user *, usig)
2253 {
2254     struct __aio_sigset ksig = { NULL, };
2255     struct timespec64   ts;
2256     bool interrupted;
2257     int ret;
2258 
2259     if (timeout && unlikely(get_timespec64(&ts, timeout)))
2260         return -EFAULT;
2261 
2262     if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2263         return -EFAULT;
2264 
2265     ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2266     if (ret)
2267         return ret;
2268 
2269     ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2270 
2271     interrupted = signal_pending(current);
2272     restore_saved_sigmask_unless(interrupted);
2273     if (interrupted && !ret)
2274         ret = -ERESTARTNOHAND;
2275 
2276     return ret;
2277 }
2278 
2279 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2280 
2281 SYSCALL_DEFINE6(io_pgetevents_time32,
2282         aio_context_t, ctx_id,
2283         long, min_nr,
2284         long, nr,
2285         struct io_event __user *, events,
2286         struct old_timespec32 __user *, timeout,
2287         const struct __aio_sigset __user *, usig)
2288 {
2289     struct __aio_sigset ksig = { NULL, };
2290     struct timespec64   ts;
2291     bool interrupted;
2292     int ret;
2293 
2294     if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2295         return -EFAULT;
2296 
2297     if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2298         return -EFAULT;
2299 
2300 
2301     ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2302     if (ret)
2303         return ret;
2304 
2305     ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2306 
2307     interrupted = signal_pending(current);
2308     restore_saved_sigmask_unless(interrupted);
2309     if (interrupted && !ret)
2310         ret = -ERESTARTNOHAND;
2311 
2312     return ret;
2313 }
2314 
2315 #endif
2316 
2317 #if defined(CONFIG_COMPAT_32BIT_TIME)
2318 
2319 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2320         __s32, min_nr,
2321         __s32, nr,
2322         struct io_event __user *, events,
2323         struct old_timespec32 __user *, timeout)
2324 {
2325     struct timespec64 t;
2326     int ret;
2327 
2328     if (timeout && get_old_timespec32(&t, timeout))
2329         return -EFAULT;
2330 
2331     ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2332     if (!ret && signal_pending(current))
2333         ret = -EINTR;
2334     return ret;
2335 }
2336 
2337 #endif
2338 
2339 #ifdef CONFIG_COMPAT
2340 
2341 struct __compat_aio_sigset {
2342     compat_uptr_t       sigmask;
2343     compat_size_t       sigsetsize;
2344 };
2345 
2346 #if defined(CONFIG_COMPAT_32BIT_TIME)
2347 
2348 COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2349         compat_aio_context_t, ctx_id,
2350         compat_long_t, min_nr,
2351         compat_long_t, nr,
2352         struct io_event __user *, events,
2353         struct old_timespec32 __user *, timeout,
2354         const struct __compat_aio_sigset __user *, usig)
2355 {
2356     struct __compat_aio_sigset ksig = { 0, };
2357     struct timespec64 t;
2358     bool interrupted;
2359     int ret;
2360 
2361     if (timeout && get_old_timespec32(&t, timeout))
2362         return -EFAULT;
2363 
2364     if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2365         return -EFAULT;
2366 
2367     ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2368     if (ret)
2369         return ret;
2370 
2371     ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2372 
2373     interrupted = signal_pending(current);
2374     restore_saved_sigmask_unless(interrupted);
2375     if (interrupted && !ret)
2376         ret = -ERESTARTNOHAND;
2377 
2378     return ret;
2379 }
2380 
2381 #endif
2382 
2383 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2384         compat_aio_context_t, ctx_id,
2385         compat_long_t, min_nr,
2386         compat_long_t, nr,
2387         struct io_event __user *, events,
2388         struct __kernel_timespec __user *, timeout,
2389         const struct __compat_aio_sigset __user *, usig)
2390 {
2391     struct __compat_aio_sigset ksig = { 0, };
2392     struct timespec64 t;
2393     bool interrupted;
2394     int ret;
2395 
2396     if (timeout && get_timespec64(&t, timeout))
2397         return -EFAULT;
2398 
2399     if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2400         return -EFAULT;
2401 
2402     ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2403     if (ret)
2404         return ret;
2405 
2406     ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2407 
2408     interrupted = signal_pending(current);
2409     restore_saved_sigmask_unless(interrupted);
2410     if (interrupted && !ret)
2411         ret = -ERESTARTNOHAND;
2412 
2413     return ret;
2414 }
2415 #endif