0001
0002
0003
0004
0005
0006
0007 #ifndef _VNIC_RQ_H_
0008 #define _VNIC_RQ_H_
0009
0010 #include <linux/pci.h>
0011 #include <linux/netdevice.h>
0012
0013 #include "vnic_dev.h"
0014 #include "vnic_cq.h"
0015
0016
0017 struct vnic_rq_ctrl {
0018 u64 ring_base;
0019 u32 ring_size;
0020 u32 pad0;
0021 u32 posted_index;
0022 u32 pad1;
0023 u32 cq_index;
0024 u32 pad2;
0025 u32 enable;
0026 u32 pad3;
0027 u32 running;
0028 u32 pad4;
0029 u32 fetch_index;
0030 u32 pad5;
0031 u32 error_interrupt_enable;
0032 u32 pad6;
0033 u32 error_interrupt_offset;
0034 u32 pad7;
0035 u32 error_status;
0036 u32 pad8;
0037 u32 dropped_packet_count;
0038 u32 pad9;
0039 u32 dropped_packet_count_rc;
0040 u32 pad10;
0041 };
0042
0043
0044 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
0045 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
0046 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
0047 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
0048 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
0049 #define VNIC_RQ_BUF_BLK_SZ(entries) \
0050 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
0051 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
0052 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
0053 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
0054
0055 struct vnic_rq_buf {
0056 struct vnic_rq_buf *next;
0057 dma_addr_t dma_addr;
0058 void *os_buf;
0059 unsigned int os_buf_index;
0060 unsigned int len;
0061 unsigned int index;
0062 void *desc;
0063 uint64_t wr_id;
0064 };
0065
0066 enum enic_poll_state {
0067 ENIC_POLL_STATE_IDLE,
0068 ENIC_POLL_STATE_NAPI,
0069 ENIC_POLL_STATE_POLL
0070 };
0071
0072 struct vnic_rq {
0073 unsigned int index;
0074 struct vnic_dev *vdev;
0075 struct vnic_rq_ctrl __iomem *ctrl;
0076 struct vnic_dev_ring ring;
0077 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
0078 struct vnic_rq_buf *to_use;
0079 struct vnic_rq_buf *to_clean;
0080 void *os_buf_head;
0081 unsigned int pkts_outstanding;
0082 };
0083
0084 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
0085 {
0086
0087 return rq->ring.desc_avail;
0088 }
0089
0090 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
0091 {
0092
0093 return rq->ring.desc_count - rq->ring.desc_avail - 1;
0094 }
0095
0096 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
0097 {
0098 return rq->to_use->desc;
0099 }
0100
0101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
0102 {
0103 return rq->to_use->index;
0104 }
0105
0106 static inline void vnic_rq_post(struct vnic_rq *rq,
0107 void *os_buf, unsigned int os_buf_index,
0108 dma_addr_t dma_addr, unsigned int len,
0109 uint64_t wrid)
0110 {
0111 struct vnic_rq_buf *buf = rq->to_use;
0112
0113 buf->os_buf = os_buf;
0114 buf->os_buf_index = os_buf_index;
0115 buf->dma_addr = dma_addr;
0116 buf->len = len;
0117 buf->wr_id = wrid;
0118
0119 buf = buf->next;
0120 rq->to_use = buf;
0121 rq->ring.desc_avail--;
0122
0123
0124
0125
0126 #ifndef VNIC_RQ_RETURN_RATE
0127 #define VNIC_RQ_RETURN_RATE 0xf
0128 #endif
0129
0130 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
0131
0132
0133
0134
0135
0136 wmb();
0137 iowrite32(buf->index, &rq->ctrl->posted_index);
0138 }
0139 }
0140
0141 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
0142 {
0143 rq->ring.desc_avail += count;
0144 }
0145
0146 enum desc_return_options {
0147 VNIC_RQ_RETURN_DESC,
0148 VNIC_RQ_DEFER_RETURN_DESC,
0149 };
0150
0151 static inline void vnic_rq_service(struct vnic_rq *rq,
0152 struct cq_desc *cq_desc, u16 completed_index,
0153 int desc_return, void (*buf_service)(struct vnic_rq *rq,
0154 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
0155 int skipped, void *opaque), void *opaque)
0156 {
0157 struct vnic_rq_buf *buf;
0158 int skipped;
0159
0160 buf = rq->to_clean;
0161 while (1) {
0162
0163 skipped = (buf->index != completed_index);
0164
0165 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
0166
0167 if (desc_return == VNIC_RQ_RETURN_DESC)
0168 rq->ring.desc_avail++;
0169
0170 rq->to_clean = buf->next;
0171
0172 if (!skipped)
0173 break;
0174
0175 buf = rq->to_clean;
0176 }
0177 }
0178
0179 static inline int vnic_rq_fill(struct vnic_rq *rq,
0180 int (*buf_fill)(struct vnic_rq *rq))
0181 {
0182 int err;
0183
0184 while (vnic_rq_desc_avail(rq) > 0) {
0185
0186 err = (*buf_fill)(rq);
0187 if (err)
0188 return err;
0189 }
0190
0191 return 0;
0192 }
0193
0194 void vnic_rq_free(struct vnic_rq *rq);
0195 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
0196 unsigned int desc_count, unsigned int desc_size);
0197 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
0198 unsigned int error_interrupt_enable,
0199 unsigned int error_interrupt_offset);
0200 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
0201 void vnic_rq_enable(struct vnic_rq *rq);
0202 int vnic_rq_disable(struct vnic_rq *rq);
0203 void vnic_rq_clean(struct vnic_rq *rq,
0204 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
0205
0206 #endif