Back to home page

OSCL-LXR

 
 

    


0001 #include <sys/types.h>
0002 #include <sys/stat.h>
0003 #include <sys/mman.h>
0004 #include <unistd.h>
0005 #include <errno.h>
0006 #include <string.h>
0007 
0008 #include "liburing.h"
0009 
0010 static int io_uring_mmap(int fd, struct io_uring_params *p,
0011              struct io_uring_sq *sq, struct io_uring_cq *cq)
0012 {
0013     size_t size;
0014     void *ptr;
0015     int ret;
0016 
0017     sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
0018     ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
0019             MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
0020     if (ptr == MAP_FAILED)
0021         return -errno;
0022     sq->khead = ptr + p->sq_off.head;
0023     sq->ktail = ptr + p->sq_off.tail;
0024     sq->kring_mask = ptr + p->sq_off.ring_mask;
0025     sq->kring_entries = ptr + p->sq_off.ring_entries;
0026     sq->kflags = ptr + p->sq_off.flags;
0027     sq->kdropped = ptr + p->sq_off.dropped;
0028     sq->array = ptr + p->sq_off.array;
0029 
0030     size = p->sq_entries * sizeof(struct io_uring_sqe);
0031     sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
0032                 MAP_SHARED | MAP_POPULATE, fd,
0033                 IORING_OFF_SQES);
0034     if (sq->sqes == MAP_FAILED) {
0035         ret = -errno;
0036 err:
0037         munmap(sq->khead, sq->ring_sz);
0038         return ret;
0039     }
0040 
0041     cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
0042     ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
0043             MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
0044     if (ptr == MAP_FAILED) {
0045         ret = -errno;
0046         munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
0047         goto err;
0048     }
0049     cq->khead = ptr + p->cq_off.head;
0050     cq->ktail = ptr + p->cq_off.tail;
0051     cq->kring_mask = ptr + p->cq_off.ring_mask;
0052     cq->kring_entries = ptr + p->cq_off.ring_entries;
0053     cq->koverflow = ptr + p->cq_off.overflow;
0054     cq->cqes = ptr + p->cq_off.cqes;
0055     return 0;
0056 }
0057 
0058 /*
0059  * For users that want to specify sq_thread_cpu or sq_thread_idle, this
0060  * interface is a convenient helper for mmap()ing the rings.
0061  * Returns -1 on error, or zero on success.  On success, 'ring'
0062  * contains the necessary information to read/write to the rings.
0063  */
0064 int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
0065 {
0066     int ret;
0067 
0068     memset(ring, 0, sizeof(*ring));
0069     ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
0070     if (!ret)
0071         ring->ring_fd = fd;
0072     return ret;
0073 }
0074 
0075 /*
0076  * Returns -1 on error, or zero on success. On success, 'ring'
0077  * contains the necessary information to read/write to the rings.
0078  */
0079 int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
0080 {
0081     struct io_uring_params p;
0082     int fd, ret;
0083 
0084     memset(&p, 0, sizeof(p));
0085     p.flags = flags;
0086 
0087     fd = io_uring_setup(entries, &p);
0088     if (fd < 0)
0089         return fd;
0090 
0091     ret = io_uring_queue_mmap(fd, &p, ring);
0092     if (ret)
0093         close(fd);
0094 
0095     return ret;
0096 }
0097 
0098 void io_uring_queue_exit(struct io_uring *ring)
0099 {
0100     struct io_uring_sq *sq = &ring->sq;
0101     struct io_uring_cq *cq = &ring->cq;
0102 
0103     munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
0104     munmap(sq->khead, sq->ring_sz);
0105     munmap(cq->khead, cq->ring_sz);
0106     close(ring->ring_fd);
0107 }