Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #ifndef IOU_KBUF_H
0003 #define IOU_KBUF_H
0004 
0005 #include <uapi/linux/io_uring.h>
0006 
0007 struct io_buffer_list {
0008     /*
0009      * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
0010      * then these are classic provided buffers and ->buf_list is used.
0011      */
0012     union {
0013         struct list_head buf_list;
0014         struct {
0015             struct page **buf_pages;
0016             struct io_uring_buf_ring *buf_ring;
0017         };
0018     };
0019     __u16 bgid;
0020 
0021     /* below is for ring provided buffers */
0022     __u16 buf_nr_pages;
0023     __u16 nr_entries;
0024     __u16 head;
0025     __u16 mask;
0026 };
0027 
0028 struct io_buffer {
0029     struct list_head list;
0030     __u64 addr;
0031     __u32 len;
0032     __u16 bid;
0033     __u16 bgid;
0034 };
0035 
0036 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
0037                   unsigned int issue_flags);
0038 void io_destroy_buffers(struct io_ring_ctx *ctx);
0039 
0040 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
0041 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
0042 
0043 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
0044 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
0045 
0046 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
0047 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
0048 
0049 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
0050 
0051 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
0052 
0053 static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
0054 {
0055     /*
0056      * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
0057      * the flag and hence ensure that bl->head doesn't get incremented.
0058      * If the tail has already been incremented, hang on to it.
0059      * The exception is partial io, that case we should increment bl->head
0060      * to monopolize the buffer.
0061      */
0062     if (req->buf_list) {
0063         if (req->flags & REQ_F_PARTIAL_IO) {
0064             /*
0065              * If we end up here, then the io_uring_lock has
0066              * been kept held since we retrieved the buffer.
0067              * For the io-wq case, we already cleared
0068              * req->buf_list when the buffer was retrieved,
0069              * hence it cannot be set here for that case.
0070              */
0071             req->buf_list->head++;
0072             req->buf_list = NULL;
0073         } else {
0074             req->buf_index = req->buf_list->bgid;
0075             req->flags &= ~REQ_F_BUFFER_RING;
0076         }
0077     }
0078 }
0079 
0080 static inline bool io_do_buffer_select(struct io_kiocb *req)
0081 {
0082     if (!(req->flags & REQ_F_BUFFER_SELECT))
0083         return false;
0084     return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
0085 }
0086 
0087 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
0088 {
0089     /*
0090      * READV uses fields in `struct io_rw` (len/addr) to stash the selected
0091      * buffer data. However if that buffer is recycled the original request
0092      * data stored in addr is lost. Therefore forbid recycling for now.
0093      */
0094     if (req->opcode == IORING_OP_READV) {
0095         if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
0096             req->buf_list->head++;
0097             req->buf_list = NULL;
0098         }
0099         return;
0100     }
0101     if (req->flags & REQ_F_BUFFER_SELECTED)
0102         io_kbuf_recycle_legacy(req, issue_flags);
0103     if (req->flags & REQ_F_BUFFER_RING)
0104         io_kbuf_recycle_ring(req);
0105 }
0106 
0107 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
0108                           struct list_head *list)
0109 {
0110     unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
0111 
0112     if (req->flags & REQ_F_BUFFER_RING) {
0113         if (req->buf_list) {
0114             req->buf_index = req->buf_list->bgid;
0115             req->buf_list->head++;
0116         }
0117         req->flags &= ~REQ_F_BUFFER_RING;
0118     } else {
0119         req->buf_index = req->kbuf->bgid;
0120         list_add(&req->kbuf->list, list);
0121         req->flags &= ~REQ_F_BUFFER_SELECTED;
0122     }
0123 
0124     return ret;
0125 }
0126 
0127 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
0128 {
0129     lockdep_assert_held(&req->ctx->completion_lock);
0130 
0131     if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
0132         return 0;
0133     return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
0134 }
0135 
0136 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
0137                        unsigned issue_flags)
0138 {
0139 
0140     if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
0141         return 0;
0142     return __io_put_kbuf(req, issue_flags);
0143 }
0144 #endif