0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/errno.h>
0009 #include <linux/types.h>
0010 #include <linux/pci.h>
0011 #include <linux/delay.h>
0012 #include <linux/slab.h>
0013
0014 #include "vnic_dev.h"
0015 #include "vnic_wq.h"
0016 #include "enic.h"
0017
0018 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
0019 {
0020 struct vnic_wq_buf *buf;
0021 unsigned int i, j, count = wq->ring.desc_count;
0022 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
0023
0024 for (i = 0; i < blks; i++) {
0025 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
0026 if (!wq->bufs[i])
0027 return -ENOMEM;
0028 }
0029
0030 for (i = 0; i < blks; i++) {
0031 buf = wq->bufs[i];
0032 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
0033 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
0034 buf->desc = (u8 *)wq->ring.descs +
0035 wq->ring.desc_size * buf->index;
0036 if (buf->index + 1 == count) {
0037 buf->next = wq->bufs[0];
0038 buf->next->prev = buf;
0039 break;
0040 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
0041 buf->next = wq->bufs[i + 1];
0042 buf->next->prev = buf;
0043 } else {
0044 buf->next = buf + 1;
0045 buf->next->prev = buf;
0046 buf++;
0047 }
0048 }
0049 }
0050
0051 wq->to_use = wq->to_clean = wq->bufs[0];
0052
0053 return 0;
0054 }
0055
0056 void vnic_wq_free(struct vnic_wq *wq)
0057 {
0058 struct vnic_dev *vdev;
0059 unsigned int i;
0060
0061 vdev = wq->vdev;
0062
0063 vnic_dev_free_desc_ring(vdev, &wq->ring);
0064
0065 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
0066 if (wq->bufs[i]) {
0067 kfree(wq->bufs[i]);
0068 wq->bufs[i] = NULL;
0069 }
0070 }
0071
0072 wq->ctrl = NULL;
0073 }
0074
0075 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
0076 unsigned int desc_count, unsigned int desc_size)
0077 {
0078 int err;
0079
0080 wq->index = index;
0081 wq->vdev = vdev;
0082
0083 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
0084 if (!wq->ctrl) {
0085 vdev_err(vdev, "Failed to hook WQ[%d] resource\n", index);
0086 return -EINVAL;
0087 }
0088
0089 vnic_wq_disable(wq);
0090
0091 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
0092 if (err)
0093 return err;
0094
0095 err = vnic_wq_alloc_bufs(wq);
0096 if (err) {
0097 vnic_wq_free(wq);
0098 return err;
0099 }
0100
0101 return 0;
0102 }
0103
0104 int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
0105 unsigned int desc_count, unsigned int desc_size)
0106 {
0107 int err;
0108
0109 wq->index = 0;
0110 wq->vdev = vdev;
0111
0112 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
0113 if (!wq->ctrl)
0114 return -EINVAL;
0115 vnic_wq_disable(wq);
0116 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
0117
0118 return err;
0119 }
0120
0121 void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
0122 unsigned int fetch_index, unsigned int posted_index,
0123 unsigned int error_interrupt_enable,
0124 unsigned int error_interrupt_offset)
0125 {
0126 u64 paddr;
0127 unsigned int count = wq->ring.desc_count;
0128
0129 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
0130 writeq(paddr, &wq->ctrl->ring_base);
0131 iowrite32(count, &wq->ctrl->ring_size);
0132 iowrite32(fetch_index, &wq->ctrl->fetch_index);
0133 iowrite32(posted_index, &wq->ctrl->posted_index);
0134 iowrite32(cq_index, &wq->ctrl->cq_index);
0135 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
0136 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
0137 iowrite32(0, &wq->ctrl->error_status);
0138
0139 wq->to_use = wq->to_clean =
0140 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
0141 [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
0142 }
0143
0144 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
0145 unsigned int error_interrupt_enable,
0146 unsigned int error_interrupt_offset)
0147 {
0148 enic_wq_init_start(wq, cq_index, 0, 0,
0149 error_interrupt_enable,
0150 error_interrupt_offset);
0151 }
0152
0153 unsigned int vnic_wq_error_status(struct vnic_wq *wq)
0154 {
0155 return ioread32(&wq->ctrl->error_status);
0156 }
0157
0158 void vnic_wq_enable(struct vnic_wq *wq)
0159 {
0160 iowrite32(1, &wq->ctrl->enable);
0161 }
0162
0163 int vnic_wq_disable(struct vnic_wq *wq)
0164 {
0165 unsigned int wait;
0166 struct vnic_dev *vdev = wq->vdev;
0167
0168 iowrite32(0, &wq->ctrl->enable);
0169
0170
0171 for (wait = 0; wait < 1000; wait++) {
0172 if (!(ioread32(&wq->ctrl->running)))
0173 return 0;
0174 udelay(10);
0175 }
0176
0177 vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index);
0178
0179 return -ETIMEDOUT;
0180 }
0181
0182 void vnic_wq_clean(struct vnic_wq *wq,
0183 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
0184 {
0185 struct vnic_wq_buf *buf;
0186
0187 buf = wq->to_clean;
0188
0189 while (vnic_wq_desc_used(wq) > 0) {
0190
0191 (*buf_clean)(wq, buf);
0192
0193 buf = wq->to_clean = buf->next;
0194 wq->ring.desc_avail++;
0195 }
0196
0197 wq->to_use = wq->to_clean = wq->bufs[0];
0198
0199 iowrite32(0, &wq->ctrl->fetch_index);
0200 iowrite32(0, &wq->ctrl->posted_index);
0201 iowrite32(0, &wq->ctrl->error_status);
0202
0203 vnic_dev_clear_desc_ring(&wq->ring);
0204 }