0001
0002
0003
0004
0005
0006 #ifndef _VNIC_RQ_H_
0007 #define _VNIC_RQ_H_
0008
0009 #include <linux/pci.h>
0010 #include "vnic_dev.h"
0011 #include "vnic_cq.h"
0012
0013
0014
0015
0016
0017 #define vnic_rq_desc_avail fnic_rq_desc_avail
0018 #define vnic_rq_desc_used fnic_rq_desc_used
0019 #define vnic_rq_next_desc fnic_rq_next_desc
0020 #define vnic_rq_next_index fnic_rq_next_index
0021 #define vnic_rq_next_buf_index fnic_rq_next_buf_index
0022 #define vnic_rq_post fnic_rq_post
0023 #define vnic_rq_posting_soon fnic_rq_posting_soon
0024 #define vnic_rq_return_descs fnic_rq_return_descs
0025 #define vnic_rq_service fnic_rq_service
0026 #define vnic_rq_fill fnic_rq_fill
0027 #define vnic_rq_free fnic_rq_free
0028 #define vnic_rq_alloc fnic_rq_alloc
0029 #define vnic_rq_init fnic_rq_init
0030 #define vnic_rq_error_status fnic_rq_error_status
0031 #define vnic_rq_enable fnic_rq_enable
0032 #define vnic_rq_disable fnic_rq_disable
0033 #define vnic_rq_clean fnic_rq_clean
0034
0035
0036 struct vnic_rq_ctrl {
0037 u64 ring_base;
0038 u32 ring_size;
0039 u32 pad0;
0040 u32 posted_index;
0041 u32 pad1;
0042 u32 cq_index;
0043 u32 pad2;
0044 u32 enable;
0045 u32 pad3;
0046 u32 running;
0047 u32 pad4;
0048 u32 fetch_index;
0049 u32 pad5;
0050 u32 error_interrupt_enable;
0051 u32 pad6;
0052 u32 error_interrupt_offset;
0053 u32 pad7;
0054 u32 error_status;
0055 u32 pad8;
0056 u32 dropped_packet_count;
0057 u32 pad9;
0058 u32 dropped_packet_count_rc;
0059 u32 pad10;
0060 };
0061
0062
0063 #define VNIC_RQ_BUF_BLK_ENTRIES 64
0064 #define VNIC_RQ_BUF_BLK_SZ \
0065 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
0066 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
0067 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
0068 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
0069
0070 struct vnic_rq_buf {
0071 struct vnic_rq_buf *next;
0072 dma_addr_t dma_addr;
0073 void *os_buf;
0074 unsigned int os_buf_index;
0075 unsigned int len;
0076 unsigned int index;
0077 void *desc;
0078 };
0079
0080 struct vnic_rq {
0081 unsigned int index;
0082 struct vnic_dev *vdev;
0083 struct vnic_rq_ctrl __iomem *ctrl;
0084 struct vnic_dev_ring ring;
0085 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
0086 struct vnic_rq_buf *to_use;
0087 struct vnic_rq_buf *to_clean;
0088 void *os_buf_head;
0089 unsigned int buf_index;
0090 unsigned int pkts_outstanding;
0091 };
0092
0093 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
0094 {
0095
0096 return rq->ring.desc_avail;
0097 }
0098
0099 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
0100 {
0101
0102 return rq->ring.desc_count - rq->ring.desc_avail - 1;
0103 }
0104
0105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
0106 {
0107 return rq->to_use->desc;
0108 }
0109
0110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
0111 {
0112 return rq->to_use->index;
0113 }
0114
0115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
0116 {
0117 return rq->buf_index++;
0118 }
0119
0120 static inline void vnic_rq_post(struct vnic_rq *rq,
0121 void *os_buf, unsigned int os_buf_index,
0122 dma_addr_t dma_addr, unsigned int len)
0123 {
0124 struct vnic_rq_buf *buf = rq->to_use;
0125
0126 buf->os_buf = os_buf;
0127 buf->os_buf_index = os_buf_index;
0128 buf->dma_addr = dma_addr;
0129 buf->len = len;
0130
0131 buf = buf->next;
0132 rq->to_use = buf;
0133 rq->ring.desc_avail--;
0134
0135
0136
0137
0138 #ifndef VNIC_RQ_RETURN_RATE
0139 #define VNIC_RQ_RETURN_RATE 0xf
0140 #endif
0141
0142 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
0143
0144
0145
0146
0147
0148 wmb();
0149 iowrite32(buf->index, &rq->ctrl->posted_index);
0150 }
0151 }
0152
0153 static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
0154 {
0155 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
0156 }
0157
0158 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
0159 {
0160 rq->ring.desc_avail += count;
0161 }
0162
0163 enum desc_return_options {
0164 VNIC_RQ_RETURN_DESC,
0165 VNIC_RQ_DEFER_RETURN_DESC,
0166 };
0167
0168 static inline void vnic_rq_service(struct vnic_rq *rq,
0169 struct cq_desc *cq_desc, u16 completed_index,
0170 int desc_return, void (*buf_service)(struct vnic_rq *rq,
0171 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
0172 int skipped, void *opaque), void *opaque)
0173 {
0174 struct vnic_rq_buf *buf;
0175 int skipped;
0176
0177 buf = rq->to_clean;
0178 while (1) {
0179
0180 skipped = (buf->index != completed_index);
0181
0182 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
0183
0184 if (desc_return == VNIC_RQ_RETURN_DESC)
0185 rq->ring.desc_avail++;
0186
0187 rq->to_clean = buf->next;
0188
0189 if (!skipped)
0190 break;
0191
0192 buf = rq->to_clean;
0193 }
0194 }
0195
0196 static inline int vnic_rq_fill(struct vnic_rq *rq,
0197 int (*buf_fill)(struct vnic_rq *rq))
0198 {
0199 int err;
0200
0201 while (vnic_rq_desc_avail(rq) > 1) {
0202
0203 err = (*buf_fill)(rq);
0204 if (err)
0205 return err;
0206 }
0207
0208 return 0;
0209 }
0210
0211 void vnic_rq_free(struct vnic_rq *rq);
0212 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
0213 unsigned int desc_count, unsigned int desc_size);
0214 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
0215 unsigned int error_interrupt_enable,
0216 unsigned int error_interrupt_offset);
0217 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
0218 void vnic_rq_enable(struct vnic_rq *rq);
0219 int vnic_rq_disable(struct vnic_rq *rq);
0220 void vnic_rq_clean(struct vnic_rq *rq,
0221 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
0222
0223 #endif