Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
0002 /*
0003  * Header file for the io_uring interface.
0004  *
0005  * Copyright (C) 2019 Jens Axboe
0006  * Copyright (C) 2019 Christoph Hellwig
0007  */
0008 #ifndef LINUX_IO_URING_H
0009 #define LINUX_IO_URING_H
0010 
0011 #include <linux/fs.h>
0012 #include <linux/types.h>
0013 #include <linux/time_types.h>
0014 
0015 #ifdef __cplusplus
0016 extern "C" {
0017 #endif
0018 
0019 /*
0020  * IO submission data structure (Submission Queue Entry)
0021  */
0022 struct io_uring_sqe {
0023     __u8    opcode;     /* type of operation for this sqe */
0024     __u8    flags;      /* IOSQE_ flags */
0025     __u16   ioprio;     /* ioprio for the request */
0026     __s32   fd;     /* file descriptor to do IO on */
0027     union {
0028         __u64   off;    /* offset into file */
0029         __u64   addr2;
0030         struct {
0031             __u32   cmd_op;
0032             __u32   __pad1;
0033         };
0034     };
0035     union {
0036         __u64   addr;   /* pointer to buffer or iovecs */
0037         __u64   splice_off_in;
0038     };
0039     __u32   len;        /* buffer size or number of iovecs */
0040     union {
0041         __kernel_rwf_t  rw_flags;
0042         __u32       fsync_flags;
0043         __u16       poll_events;    /* compatibility */
0044         __u32       poll32_events;  /* word-reversed for BE */
0045         __u32       sync_range_flags;
0046         __u32       msg_flags;
0047         __u32       timeout_flags;
0048         __u32       accept_flags;
0049         __u32       cancel_flags;
0050         __u32       open_flags;
0051         __u32       statx_flags;
0052         __u32       fadvise_advice;
0053         __u32       splice_flags;
0054         __u32       rename_flags;
0055         __u32       unlink_flags;
0056         __u32       hardlink_flags;
0057         __u32       xattr_flags;
0058         __u32       msg_ring_flags;
0059     };
0060     __u64   user_data;  /* data to be passed back at completion time */
0061     /* pack this to avoid bogus arm OABI complaints */
0062     union {
0063         /* index into fixed buffers, if used */
0064         __u16   buf_index;
0065         /* for grouped buffer selection */
0066         __u16   buf_group;
0067     } __attribute__((packed));
0068     /* personality to use, if used */
0069     __u16   personality;
0070     union {
0071         __s32   splice_fd_in;
0072         __u32   file_index;
0073         struct {
0074             __u16   addr_len;
0075             __u16   __pad3[1];
0076         };
0077     };
0078     union {
0079         struct {
0080             __u64   addr3;
0081             __u64   __pad2[1];
0082         };
0083         /*
0084          * If the ring is initialized with IORING_SETUP_SQE128, then
0085          * this field is used for 80 bytes of arbitrary command data
0086          */
0087         __u8    cmd[0];
0088     };
0089 };
0090 
0091 /*
0092  * If sqe->file_index is set to this for opcodes that instantiate a new
0093  * direct descriptor (like openat/openat2/accept), then io_uring will allocate
0094  * an available direct descriptor instead of having the application pass one
0095  * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
0096  * if the space is full.
0097  */
0098 #define IORING_FILE_INDEX_ALLOC     (~0U)
0099 
0100 enum {
0101     IOSQE_FIXED_FILE_BIT,
0102     IOSQE_IO_DRAIN_BIT,
0103     IOSQE_IO_LINK_BIT,
0104     IOSQE_IO_HARDLINK_BIT,
0105     IOSQE_ASYNC_BIT,
0106     IOSQE_BUFFER_SELECT_BIT,
0107     IOSQE_CQE_SKIP_SUCCESS_BIT,
0108 };
0109 
0110 /*
0111  * sqe->flags
0112  */
0113 /* use fixed fileset */
0114 #define IOSQE_FIXED_FILE    (1U << IOSQE_FIXED_FILE_BIT)
0115 /* issue after inflight IO */
0116 #define IOSQE_IO_DRAIN      (1U << IOSQE_IO_DRAIN_BIT)
0117 /* links next sqe */
0118 #define IOSQE_IO_LINK       (1U << IOSQE_IO_LINK_BIT)
0119 /* like LINK, but stronger */
0120 #define IOSQE_IO_HARDLINK   (1U << IOSQE_IO_HARDLINK_BIT)
0121 /* always go async */
0122 #define IOSQE_ASYNC     (1U << IOSQE_ASYNC_BIT)
0123 /* select buffer from sqe->buf_group */
0124 #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
0125 /* don't post CQE if request succeeded */
0126 #define IOSQE_CQE_SKIP_SUCCESS  (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
0127 
0128 /*
0129  * io_uring_setup() flags
0130  */
0131 #define IORING_SETUP_IOPOLL (1U << 0)   /* io_context is polled */
0132 #define IORING_SETUP_SQPOLL (1U << 1)   /* SQ poll thread */
0133 #define IORING_SETUP_SQ_AFF (1U << 2)   /* sq_thread_cpu is valid */
0134 #define IORING_SETUP_CQSIZE (1U << 3)   /* app defines CQ size */
0135 #define IORING_SETUP_CLAMP  (1U << 4)   /* clamp SQ/CQ ring sizes */
0136 #define IORING_SETUP_ATTACH_WQ  (1U << 5)   /* attach to existing wq */
0137 #define IORING_SETUP_R_DISABLED (1U << 6)   /* start with ring disabled */
0138 #define IORING_SETUP_SUBMIT_ALL (1U << 7)   /* continue submit on error */
0139 /*
0140  * Cooperative task running. When requests complete, they often require
0141  * forcing the submitter to transition to the kernel to complete. If this
0142  * flag is set, work will be done when the task transitions anyway, rather
0143  * than force an inter-processor interrupt reschedule. This avoids interrupting
0144  * a task running in userspace, and saves an IPI.
0145  */
0146 #define IORING_SETUP_COOP_TASKRUN   (1U << 8)
0147 /*
0148  * If COOP_TASKRUN is set, get notified if task work is available for
0149  * running and a kernel transition would be needed to run it. This sets
0150  * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
0151  */
0152 #define IORING_SETUP_TASKRUN_FLAG   (1U << 9)
0153 #define IORING_SETUP_SQE128     (1U << 10) /* SQEs are 128 byte */
0154 #define IORING_SETUP_CQE32      (1U << 11) /* CQEs are 32 byte */
0155 /*
0156  * Only one task is allowed to submit requests
0157  */
0158 #define IORING_SETUP_SINGLE_ISSUER  (1U << 12)
0159 
0160 enum io_uring_op {
0161     IORING_OP_NOP,
0162     IORING_OP_READV,
0163     IORING_OP_WRITEV,
0164     IORING_OP_FSYNC,
0165     IORING_OP_READ_FIXED,
0166     IORING_OP_WRITE_FIXED,
0167     IORING_OP_POLL_ADD,
0168     IORING_OP_POLL_REMOVE,
0169     IORING_OP_SYNC_FILE_RANGE,
0170     IORING_OP_SENDMSG,
0171     IORING_OP_RECVMSG,
0172     IORING_OP_TIMEOUT,
0173     IORING_OP_TIMEOUT_REMOVE,
0174     IORING_OP_ACCEPT,
0175     IORING_OP_ASYNC_CANCEL,
0176     IORING_OP_LINK_TIMEOUT,
0177     IORING_OP_CONNECT,
0178     IORING_OP_FALLOCATE,
0179     IORING_OP_OPENAT,
0180     IORING_OP_CLOSE,
0181     IORING_OP_FILES_UPDATE,
0182     IORING_OP_STATX,
0183     IORING_OP_READ,
0184     IORING_OP_WRITE,
0185     IORING_OP_FADVISE,
0186     IORING_OP_MADVISE,
0187     IORING_OP_SEND,
0188     IORING_OP_RECV,
0189     IORING_OP_OPENAT2,
0190     IORING_OP_EPOLL_CTL,
0191     IORING_OP_SPLICE,
0192     IORING_OP_PROVIDE_BUFFERS,
0193     IORING_OP_REMOVE_BUFFERS,
0194     IORING_OP_TEE,
0195     IORING_OP_SHUTDOWN,
0196     IORING_OP_RENAMEAT,
0197     IORING_OP_UNLINKAT,
0198     IORING_OP_MKDIRAT,
0199     IORING_OP_SYMLINKAT,
0200     IORING_OP_LINKAT,
0201     IORING_OP_MSG_RING,
0202     IORING_OP_FSETXATTR,
0203     IORING_OP_SETXATTR,
0204     IORING_OP_FGETXATTR,
0205     IORING_OP_GETXATTR,
0206     IORING_OP_SOCKET,
0207     IORING_OP_URING_CMD,
0208     IORING_OP_SEND_ZC,
0209 
0210     /* this goes last, obviously */
0211     IORING_OP_LAST,
0212 };
0213 
0214 /*
0215  * sqe->fsync_flags
0216  */
0217 #define IORING_FSYNC_DATASYNC   (1U << 0)
0218 
0219 /*
0220  * sqe->timeout_flags
0221  */
0222 #define IORING_TIMEOUT_ABS      (1U << 0)
0223 #define IORING_TIMEOUT_UPDATE       (1U << 1)
0224 #define IORING_TIMEOUT_BOOTTIME     (1U << 2)
0225 #define IORING_TIMEOUT_REALTIME     (1U << 3)
0226 #define IORING_LINK_TIMEOUT_UPDATE  (1U << 4)
0227 #define IORING_TIMEOUT_ETIME_SUCCESS    (1U << 5)
0228 #define IORING_TIMEOUT_CLOCK_MASK   (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
0229 #define IORING_TIMEOUT_UPDATE_MASK  (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
0230 /*
0231  * sqe->splice_flags
0232  * extends splice(2) flags
0233  */
0234 #define SPLICE_F_FD_IN_FIXED    (1U << 31) /* the last bit of __u32 */
0235 
0236 /*
0237  * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
0238  * command flags for POLL_ADD are stored in sqe->len.
0239  *
0240  * IORING_POLL_ADD_MULTI    Multishot poll. Sets IORING_CQE_F_MORE if
0241  *              the poll handler will continue to report
0242  *              CQEs on behalf of the same SQE.
0243  *
0244  * IORING_POLL_UPDATE       Update existing poll request, matching
0245  *              sqe->addr as the old user_data field.
0246  *
0247  * IORING_POLL_LEVEL        Level triggered poll.
0248  */
0249 #define IORING_POLL_ADD_MULTI   (1U << 0)
0250 #define IORING_POLL_UPDATE_EVENTS   (1U << 1)
0251 #define IORING_POLL_UPDATE_USER_DATA    (1U << 2)
0252 #define IORING_POLL_ADD_LEVEL       (1U << 3)
0253 
0254 /*
0255  * ASYNC_CANCEL flags.
0256  *
0257  * IORING_ASYNC_CANCEL_ALL  Cancel all requests that match the given key
0258  * IORING_ASYNC_CANCEL_FD   Key off 'fd' for cancelation rather than the
0259  *              request 'user_data'
0260  * IORING_ASYNC_CANCEL_ANY  Match any request
0261  * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
0262  */
0263 #define IORING_ASYNC_CANCEL_ALL (1U << 0)
0264 #define IORING_ASYNC_CANCEL_FD  (1U << 1)
0265 #define IORING_ASYNC_CANCEL_ANY (1U << 2)
0266 #define IORING_ASYNC_CANCEL_FD_FIXED    (1U << 3)
0267 
0268 /*
0269  * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
0270  *
0271  * IORING_RECVSEND_POLL_FIRST   If set, instead of first attempting to send
0272  *              or receive and arm poll if that yields an
0273  *              -EAGAIN result, arm poll upfront and skip
0274  *              the initial transfer attempt.
0275  *
0276  * IORING_RECV_MULTISHOT    Multishot recv. Sets IORING_CQE_F_MORE if
0277  *              the handler will continue to report
0278  *              CQEs on behalf of the same SQE.
0279  *
0280  * IORING_RECVSEND_FIXED_BUF    Use registered buffers, the index is stored in
0281  *              the buf_index field.
0282  */
0283 #define IORING_RECVSEND_POLL_FIRST  (1U << 0)
0284 #define IORING_RECV_MULTISHOT       (1U << 1)
0285 #define IORING_RECVSEND_FIXED_BUF   (1U << 2)
0286 
0287 /*
0288  * accept flags stored in sqe->ioprio
0289  */
0290 #define IORING_ACCEPT_MULTISHOT (1U << 0)
0291 
0292 /*
0293  * IORING_OP_MSG_RING command types, stored in sqe->addr
0294  */
0295 enum {
0296     IORING_MSG_DATA,    /* pass sqe->len as 'res' and off as user_data */
0297     IORING_MSG_SEND_FD, /* send a registered fd to another ring */
0298 };
0299 
0300 /*
0301  * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
0302  *
0303  * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
0304  *              applicable for IORING_MSG_DATA, obviously.
0305  */
0306 #define IORING_MSG_RING_CQE_SKIP    (1U << 0)
0307 
0308 /*
0309  * IO completion data structure (Completion Queue Entry)
0310  */
0311 struct io_uring_cqe {
0312     __u64   user_data;  /* sqe->data submission passed back */
0313     __s32   res;        /* result code for this event */
0314     __u32   flags;
0315 
0316     /*
0317      * If the ring is initialized with IORING_SETUP_CQE32, then this field
0318      * contains 16-bytes of padding, doubling the size of the CQE.
0319      */
0320     __u64 big_cqe[];
0321 };
0322 
0323 /*
0324  * cqe->flags
0325  *
0326  * IORING_CQE_F_BUFFER  If set, the upper 16 bits are the buffer ID
0327  * IORING_CQE_F_MORE    If set, parent SQE will generate more CQE entries
0328  * IORING_CQE_F_SOCK_NONEMPTY   If set, more data to read after socket recv
0329  * IORING_CQE_F_NOTIF   Set for notification CQEs. Can be used to distinct
0330  *          them from sends.
0331  */
0332 #define IORING_CQE_F_BUFFER     (1U << 0)
0333 #define IORING_CQE_F_MORE       (1U << 1)
0334 #define IORING_CQE_F_SOCK_NONEMPTY  (1U << 2)
0335 #define IORING_CQE_F_NOTIF      (1U << 3)
0336 
0337 enum {
0338     IORING_CQE_BUFFER_SHIFT     = 16,
0339 };
0340 
0341 /*
0342  * Magic offsets for the application to mmap the data it needs
0343  */
0344 #define IORING_OFF_SQ_RING      0ULL
0345 #define IORING_OFF_CQ_RING      0x8000000ULL
0346 #define IORING_OFF_SQES         0x10000000ULL
0347 
0348 /*
0349  * Filled with the offset for mmap(2)
0350  */
0351 struct io_sqring_offsets {
0352     __u32 head;
0353     __u32 tail;
0354     __u32 ring_mask;
0355     __u32 ring_entries;
0356     __u32 flags;
0357     __u32 dropped;
0358     __u32 array;
0359     __u32 resv1;
0360     __u64 resv2;
0361 };
0362 
0363 /*
0364  * sq_ring->flags
0365  */
0366 #define IORING_SQ_NEED_WAKEUP   (1U << 0) /* needs io_uring_enter wakeup */
0367 #define IORING_SQ_CQ_OVERFLOW   (1U << 1) /* CQ ring is overflown */
0368 #define IORING_SQ_TASKRUN   (1U << 2) /* task should enter the kernel */
0369 
0370 struct io_cqring_offsets {
0371     __u32 head;
0372     __u32 tail;
0373     __u32 ring_mask;
0374     __u32 ring_entries;
0375     __u32 overflow;
0376     __u32 cqes;
0377     __u32 flags;
0378     __u32 resv1;
0379     __u64 resv2;
0380 };
0381 
0382 /*
0383  * cq_ring->flags
0384  */
0385 
0386 /* disable eventfd notifications */
0387 #define IORING_CQ_EVENTFD_DISABLED  (1U << 0)
0388 
0389 /*
0390  * io_uring_enter(2) flags
0391  */
0392 #define IORING_ENTER_GETEVENTS      (1U << 0)
0393 #define IORING_ENTER_SQ_WAKEUP      (1U << 1)
0394 #define IORING_ENTER_SQ_WAIT        (1U << 2)
0395 #define IORING_ENTER_EXT_ARG        (1U << 3)
0396 #define IORING_ENTER_REGISTERED_RING    (1U << 4)
0397 
0398 /*
0399  * Passed in for io_uring_setup(2). Copied back with updated info on success
0400  */
0401 struct io_uring_params {
0402     __u32 sq_entries;
0403     __u32 cq_entries;
0404     __u32 flags;
0405     __u32 sq_thread_cpu;
0406     __u32 sq_thread_idle;
0407     __u32 features;
0408     __u32 wq_fd;
0409     __u32 resv[3];
0410     struct io_sqring_offsets sq_off;
0411     struct io_cqring_offsets cq_off;
0412 };
0413 
0414 /*
0415  * io_uring_params->features flags
0416  */
0417 #define IORING_FEAT_SINGLE_MMAP     (1U << 0)
0418 #define IORING_FEAT_NODROP      (1U << 1)
0419 #define IORING_FEAT_SUBMIT_STABLE   (1U << 2)
0420 #define IORING_FEAT_RW_CUR_POS      (1U << 3)
0421 #define IORING_FEAT_CUR_PERSONALITY (1U << 4)
0422 #define IORING_FEAT_FAST_POLL       (1U << 5)
0423 #define IORING_FEAT_POLL_32BITS     (1U << 6)
0424 #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
0425 #define IORING_FEAT_EXT_ARG     (1U << 8)
0426 #define IORING_FEAT_NATIVE_WORKERS  (1U << 9)
0427 #define IORING_FEAT_RSRC_TAGS       (1U << 10)
0428 #define IORING_FEAT_CQE_SKIP        (1U << 11)
0429 #define IORING_FEAT_LINKED_FILE     (1U << 12)
0430 
0431 /*
0432  * io_uring_register(2) opcodes and arguments
0433  */
0434 enum {
0435     IORING_REGISTER_BUFFERS         = 0,
0436     IORING_UNREGISTER_BUFFERS       = 1,
0437     IORING_REGISTER_FILES           = 2,
0438     IORING_UNREGISTER_FILES         = 3,
0439     IORING_REGISTER_EVENTFD         = 4,
0440     IORING_UNREGISTER_EVENTFD       = 5,
0441     IORING_REGISTER_FILES_UPDATE        = 6,
0442     IORING_REGISTER_EVENTFD_ASYNC       = 7,
0443     IORING_REGISTER_PROBE           = 8,
0444     IORING_REGISTER_PERSONALITY     = 9,
0445     IORING_UNREGISTER_PERSONALITY       = 10,
0446     IORING_REGISTER_RESTRICTIONS        = 11,
0447     IORING_REGISTER_ENABLE_RINGS        = 12,
0448 
0449     /* extended with tagging */
0450     IORING_REGISTER_FILES2          = 13,
0451     IORING_REGISTER_FILES_UPDATE2       = 14,
0452     IORING_REGISTER_BUFFERS2        = 15,
0453     IORING_REGISTER_BUFFERS_UPDATE      = 16,
0454 
0455     /* set/clear io-wq thread affinities */
0456     IORING_REGISTER_IOWQ_AFF        = 17,
0457     IORING_UNREGISTER_IOWQ_AFF      = 18,
0458 
0459     /* set/get max number of io-wq workers */
0460     IORING_REGISTER_IOWQ_MAX_WORKERS    = 19,
0461 
0462     /* register/unregister io_uring fd with the ring */
0463     IORING_REGISTER_RING_FDS        = 20,
0464     IORING_UNREGISTER_RING_FDS      = 21,
0465 
0466     /* register ring based provide buffer group */
0467     IORING_REGISTER_PBUF_RING       = 22,
0468     IORING_UNREGISTER_PBUF_RING     = 23,
0469 
0470     /* sync cancelation API */
0471     IORING_REGISTER_SYNC_CANCEL     = 24,
0472 
0473     /* register a range of fixed file slots for automatic slot allocation */
0474     IORING_REGISTER_FILE_ALLOC_RANGE    = 25,
0475 
0476     /* this goes last */
0477     IORING_REGISTER_LAST
0478 };
0479 
0480 /* io-wq worker categories */
0481 enum {
0482     IO_WQ_BOUND,
0483     IO_WQ_UNBOUND,
0484 };
0485 
0486 /* deprecated, see struct io_uring_rsrc_update */
0487 struct io_uring_files_update {
0488     __u32 offset;
0489     __u32 resv;
0490     __aligned_u64 /* __s32 * */ fds;
0491 };
0492 
0493 /*
0494  * Register a fully sparse file space, rather than pass in an array of all
0495  * -1 file descriptors.
0496  */
0497 #define IORING_RSRC_REGISTER_SPARSE (1U << 0)
0498 
0499 struct io_uring_rsrc_register {
0500     __u32 nr;
0501     __u32 flags;
0502     __u64 resv2;
0503     __aligned_u64 data;
0504     __aligned_u64 tags;
0505 };
0506 
0507 struct io_uring_rsrc_update {
0508     __u32 offset;
0509     __u32 resv;
0510     __aligned_u64 data;
0511 };
0512 
0513 struct io_uring_rsrc_update2 {
0514     __u32 offset;
0515     __u32 resv;
0516     __aligned_u64 data;
0517     __aligned_u64 tags;
0518     __u32 nr;
0519     __u32 resv2;
0520 };
0521 
0522 struct io_uring_notification_slot {
0523     __u64 tag;
0524     __u64 resv[3];
0525 };
0526 
0527 struct io_uring_notification_register {
0528     __u32 nr_slots;
0529     __u32 resv;
0530     __u64 resv2;
0531     __u64 data;
0532     __u64 resv3;
0533 };
0534 
0535 /* Skip updating fd indexes set to this value in the fd table */
0536 #define IORING_REGISTER_FILES_SKIP  (-2)
0537 
0538 #define IO_URING_OP_SUPPORTED   (1U << 0)
0539 
0540 struct io_uring_probe_op {
0541     __u8 op;
0542     __u8 resv;
0543     __u16 flags;    /* IO_URING_OP_* flags */
0544     __u32 resv2;
0545 };
0546 
0547 struct io_uring_probe {
0548     __u8 last_op;   /* last opcode supported */
0549     __u8 ops_len;   /* length of ops[] array below */
0550     __u16 resv;
0551     __u32 resv2[3];
0552     struct io_uring_probe_op ops[];
0553 };
0554 
0555 struct io_uring_restriction {
0556     __u16 opcode;
0557     union {
0558         __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
0559         __u8 sqe_op;      /* IORING_RESTRICTION_SQE_OP */
0560         __u8 sqe_flags;   /* IORING_RESTRICTION_SQE_FLAGS_* */
0561     };
0562     __u8 resv;
0563     __u32 resv2[3];
0564 };
0565 
0566 struct io_uring_buf {
0567     __u64   addr;
0568     __u32   len;
0569     __u16   bid;
0570     __u16   resv;
0571 };
0572 
0573 struct io_uring_buf_ring {
0574     union {
0575         /*
0576          * To avoid spilling into more pages than we need to, the
0577          * ring tail is overlaid with the io_uring_buf->resv field.
0578          */
0579         struct {
0580             __u64   resv1;
0581             __u32   resv2;
0582             __u16   resv3;
0583             __u16   tail;
0584         };
0585         struct io_uring_buf bufs[0];
0586     };
0587 };
0588 
0589 /* argument for IORING_(UN)REGISTER_PBUF_RING */
0590 struct io_uring_buf_reg {
0591     __u64   ring_addr;
0592     __u32   ring_entries;
0593     __u16   bgid;
0594     __u16   pad;
0595     __u64   resv[3];
0596 };
0597 
0598 /*
0599  * io_uring_restriction->opcode values
0600  */
0601 enum {
0602     /* Allow an io_uring_register(2) opcode */
0603     IORING_RESTRICTION_REGISTER_OP      = 0,
0604 
0605     /* Allow an sqe opcode */
0606     IORING_RESTRICTION_SQE_OP       = 1,
0607 
0608     /* Allow sqe flags */
0609     IORING_RESTRICTION_SQE_FLAGS_ALLOWED    = 2,
0610 
0611     /* Require sqe flags (these flags must be set on each submission) */
0612     IORING_RESTRICTION_SQE_FLAGS_REQUIRED   = 3,
0613 
0614     IORING_RESTRICTION_LAST
0615 };
0616 
0617 struct io_uring_getevents_arg {
0618     __u64   sigmask;
0619     __u32   sigmask_sz;
0620     __u32   pad;
0621     __u64   ts;
0622 };
0623 
0624 /*
0625  * Argument for IORING_REGISTER_SYNC_CANCEL
0626  */
0627 struct io_uring_sync_cancel_reg {
0628     __u64               addr;
0629     __s32               fd;
0630     __u32               flags;
0631     struct __kernel_timespec    timeout;
0632     __u64               pad[4];
0633 };
0634 
0635 /*
0636  * Argument for IORING_REGISTER_FILE_ALLOC_RANGE
0637  * The range is specified as [off, off + len)
0638  */
0639 struct io_uring_file_index_range {
0640     __u32   off;
0641     __u32   len;
0642     __u64   resv;
0643 };
0644 
0645 struct io_uring_recvmsg_out {
0646     __u32 namelen;
0647     __u32 controllen;
0648     __u32 payloadlen;
0649     __u32 flags;
0650 };
0651 
0652 #ifdef __cplusplus
0653 }
0654 #endif
0655 
0656 #endif