Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
0002 
0003 #ifndef _FUN_QEUEUE_H
0004 #define _FUN_QEUEUE_H
0005 
0006 #include <linux/interrupt.h>
0007 #include <linux/io.h>
0008 
0009 struct device;
0010 struct fun_dev;
0011 struct fun_queue;
0012 struct fun_cqe_info;
0013 struct fun_rsp_common;
0014 
0015 typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
0016                   const struct fun_cqe_info *info);
0017 
0018 struct fun_rq_info {
0019     dma_addr_t dma;
0020     struct page *page;
0021 };
0022 
0023 /* A queue group consisting of an SQ, a CQ, and an optional RQ. */
0024 struct fun_queue {
0025     struct fun_dev *fdev;
0026     spinlock_t sq_lock;
0027 
0028     dma_addr_t cq_dma_addr;
0029     dma_addr_t sq_dma_addr;
0030     dma_addr_t rq_dma_addr;
0031 
0032     u32 __iomem *cq_db;
0033     u32 __iomem *sq_db;
0034     u32 __iomem *rq_db;
0035 
0036     void *cqes;
0037     void *sq_cmds;
0038     struct fun_eprq_rqbuf *rqes;
0039     struct fun_rq_info *rq_info;
0040 
0041     u32 cqid;
0042     u32 sqid;
0043     u32 rqid;
0044 
0045     u32 cq_depth;
0046     u32 sq_depth;
0047     u32 rq_depth;
0048 
0049     u16 cq_head;
0050     u16 sq_tail;
0051     u16 rq_tail;
0052 
0053     u8 cqe_size_log2;
0054     u8 sqe_size_log2;
0055 
0056     u16 cqe_info_offset;
0057 
0058     u16 rq_buf_idx;
0059     int rq_buf_offset;
0060     u16 num_rqe_to_fill;
0061 
0062     u8 cq_intcoal_usec;
0063     u8 cq_intcoal_nentries;
0064     u8 sq_intcoal_usec;
0065     u8 sq_intcoal_nentries;
0066 
0067     u16 cq_flags;
0068     u16 sq_flags;
0069     u16 rq_flags;
0070 
0071     /* SQ head writeback */
0072     u16 sq_comp;
0073 
0074     volatile __be64 *sq_head;
0075 
0076     cq_callback_t cq_cb;
0077     void *cb_data;
0078 
0079     irq_handler_t irq_handler;
0080     void *irq_data;
0081     s16 cq_vector;
0082     u8 cq_phase;
0083 
0084     /* I/O q index */
0085     u16 qid;
0086 
0087     char irqname[24];
0088 };
0089 
0090 static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
0091 {
0092     return funq->sq_cmds + (pos << funq->sqe_size_log2);
0093 }
0094 
0095 static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
0096 {
0097     if (++tail == funq->sq_depth)
0098         tail = 0;
0099     funq->sq_tail = tail;
0100     writel(tail, funq->sq_db);
0101 }
0102 
0103 static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
0104                          void *cqe)
0105 {
0106     return cqe + funq->cqe_info_offset;
0107 }
0108 
0109 static inline void funq_rq_post(struct fun_queue *funq)
0110 {
0111     writel(funq->rq_tail, funq->rq_db);
0112 }
0113 
0114 struct fun_queue_alloc_req {
0115     u8  cqe_size_log2;
0116     u8  sqe_size_log2;
0117 
0118     u16 cq_flags;
0119     u16 sq_flags;
0120     u16 rq_flags;
0121 
0122     u32 cq_depth;
0123     u32 sq_depth;
0124     u32 rq_depth;
0125 
0126     u8 cq_intcoal_usec;
0127     u8 cq_intcoal_nentries;
0128     u8 sq_intcoal_usec;
0129     u8 sq_intcoal_nentries;
0130 };
0131 
0132 int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
0133           u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
0134           u8 coal_nentries, u8 coal_usec, u32 irq_num,
0135           u32 scan_start_id, u32 scan_end_id,
0136           u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
0137 int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
0138           u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
0139           u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
0140           u32 irq_num, u32 scan_start_id, u32 scan_end_id,
0141           u32 *cqidp, u32 __iomem **dbp);
0142 void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
0143              size_t hw_desc_sz, size_t sw_desc_size, bool wb,
0144              int numa_node, dma_addr_t *dma_addr, void **sw_va,
0145              volatile __be64 **wb_va);
0146 void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
0147                bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
0148 
0149 #define fun_destroy_sq(fdev, sqid) \
0150     fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
0151 #define fun_destroy_cq(fdev, cqid) \
0152     fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
0153 
0154 struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
0155                   const struct fun_queue_alloc_req *req);
0156 void fun_free_queue(struct fun_queue *funq);
0157 
0158 static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
0159                        void *cb_data)
0160 {
0161     funq->cq_cb = cb;
0162     funq->cb_data = cb_data;
0163 }
0164 
0165 int fun_create_rq(struct fun_queue *funq);
0166 int fun_create_queue(struct fun_queue *funq);
0167 
0168 void fun_free_irq(struct fun_queue *funq);
0169 int fun_request_irq(struct fun_queue *funq, const char *devname,
0170             irq_handler_t handler, void *data);
0171 
0172 unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
0173 unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
0174 
0175 #endif /* _FUN_QEUEUE_H */