Back to home page

OSCL-LXR

 
 

    


0001 #ifndef LIB_URING_H
0002 #define LIB_URING_H
0003 
0004 #ifdef __cplusplus
0005 extern "C" {
0006 #endif
0007 
0008 #include <sys/uio.h>
0009 #include <signal.h>
0010 #include <string.h>
0011 #include "../../include/uapi/linux/io_uring.h"
0012 #include <inttypes.h>
0013 #include <linux/swab.h>
0014 #include "barrier.h"
0015 
0016 /*
0017  * Library interface to io_uring
0018  */
0019 struct io_uring_sq {
0020     unsigned *khead;
0021     unsigned *ktail;
0022     unsigned *kring_mask;
0023     unsigned *kring_entries;
0024     unsigned *kflags;
0025     unsigned *kdropped;
0026     unsigned *array;
0027     struct io_uring_sqe *sqes;
0028 
0029     unsigned sqe_head;
0030     unsigned sqe_tail;
0031 
0032     size_t ring_sz;
0033 };
0034 
0035 struct io_uring_cq {
0036     unsigned *khead;
0037     unsigned *ktail;
0038     unsigned *kring_mask;
0039     unsigned *kring_entries;
0040     unsigned *koverflow;
0041     struct io_uring_cqe *cqes;
0042 
0043     size_t ring_sz;
0044 };
0045 
0046 struct io_uring {
0047     struct io_uring_sq sq;
0048     struct io_uring_cq cq;
0049     int ring_fd;
0050 };
0051 
0052 /*
0053  * System calls
0054  */
0055 extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
0056 extern int io_uring_enter(int fd, unsigned to_submit,
0057     unsigned min_complete, unsigned flags, sigset_t *sig);
0058 extern int io_uring_register(int fd, unsigned int opcode, void *arg,
0059     unsigned int nr_args);
0060 
0061 /*
0062  * Library interface
0063  */
0064 extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
0065     unsigned flags);
0066 extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
0067     struct io_uring *ring);
0068 extern void io_uring_queue_exit(struct io_uring *ring);
0069 extern int io_uring_peek_cqe(struct io_uring *ring,
0070     struct io_uring_cqe **cqe_ptr);
0071 extern int io_uring_wait_cqe(struct io_uring *ring,
0072     struct io_uring_cqe **cqe_ptr);
0073 extern int io_uring_submit(struct io_uring *ring);
0074 extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
0075 
0076 /*
0077  * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
0078  * been processed by the application.
0079  */
0080 static inline void io_uring_cqe_seen(struct io_uring *ring,
0081                      struct io_uring_cqe *cqe)
0082 {
0083     if (cqe) {
0084         struct io_uring_cq *cq = &ring->cq;
0085 
0086         (*cq->khead)++;
0087         /*
0088          * Ensure that the kernel sees our new head, the kernel has
0089          * the matching read barrier.
0090          */
0091         write_barrier();
0092     }
0093 }
0094 
0095 /*
0096  * Command prep helpers
0097  */
0098 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
0099 {
0100     sqe->user_data = (unsigned long) data;
0101 }
0102 
0103 static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
0104 {
0105     return (void *) (uintptr_t) cqe->user_data;
0106 }
0107 
0108 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
0109                     const void *addr, unsigned len,
0110                     off_t offset)
0111 {
0112     memset(sqe, 0, sizeof(*sqe));
0113     sqe->opcode = op;
0114     sqe->fd = fd;
0115     sqe->off = offset;
0116     sqe->addr = (unsigned long) addr;
0117     sqe->len = len;
0118 }
0119 
0120 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
0121                        const struct iovec *iovecs,
0122                        unsigned nr_vecs, off_t offset)
0123 {
0124     io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
0125 }
0126 
0127 static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
0128                         void *buf, unsigned nbytes,
0129                         off_t offset)
0130 {
0131     io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
0132 }
0133 
0134 static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
0135                     const struct iovec *iovecs,
0136                     unsigned nr_vecs, off_t offset)
0137 {
0138     io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
0139 }
0140 
0141 static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
0142                          const void *buf, unsigned nbytes,
0143                          off_t offset)
0144 {
0145     io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
0146 }
0147 
0148 static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
0149                       unsigned poll_mask)
0150 {
0151     memset(sqe, 0, sizeof(*sqe));
0152     sqe->opcode = IORING_OP_POLL_ADD;
0153     sqe->fd = fd;
0154 #if __BYTE_ORDER == __BIG_ENDIAN
0155     poll_mask = __swahw32(poll_mask);
0156 #endif
0157     sqe->poll_events = poll_mask;
0158 }
0159 
0160 static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
0161                          void *user_data)
0162 {
0163     memset(sqe, 0, sizeof(*sqe));
0164     sqe->opcode = IORING_OP_POLL_REMOVE;
0165     sqe->addr = (unsigned long) user_data;
0166 }
0167 
0168 static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
0169                        unsigned fsync_flags)
0170 {
0171     memset(sqe, 0, sizeof(*sqe));
0172     sqe->opcode = IORING_OP_FSYNC;
0173     sqe->fd = fd;
0174     sqe->fsync_flags = fsync_flags;
0175 }
0176 
0177 static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
0178 {
0179     memset(sqe, 0, sizeof(*sqe));
0180     sqe->opcode = IORING_OP_NOP;
0181 }
0182 
0183 #ifdef __cplusplus
0184 }
0185 #endif
0186 
0187 #endif