Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
0004  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
0005  */
0006 
0007 #include <linux/errno.h>
0008 #include <linux/types.h>
0009 #include <linux/pci.h>
0010 #include <linux/delay.h>
0011 #include <linux/slab.h>
0012 #include "vnic_dev.h"
0013 #include "vnic_wq.h"
0014 
0015 
0016 static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
0017         unsigned int index, enum vnic_res_type res_type)
0018 {
0019     wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
0020 
0021     if (!wq->ctrl)
0022         return -EINVAL;
0023 
0024     return 0;
0025 }
0026 
0027 
0028 static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
0029         unsigned int desc_count, unsigned int desc_size)
0030 {
0031     return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
0032 }
0033 
0034 
0035 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
0036 {
0037     struct vnic_wq_buf *buf;
0038     unsigned int i, j, count = wq->ring.desc_count;
0039     unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
0040 
0041     for (i = 0; i < blks; i++) {
0042         wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
0043         if (!wq->bufs[i]) {
0044             printk(KERN_ERR "Failed to alloc wq_bufs\n");
0045             return -ENOMEM;
0046         }
0047     }
0048 
0049     for (i = 0; i < blks; i++) {
0050         buf = wq->bufs[i];
0051         for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
0052             buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
0053             buf->desc = (u8 *)wq->ring.descs +
0054                 wq->ring.desc_size * buf->index;
0055             if (buf->index + 1 == count) {
0056                 buf->next = wq->bufs[0];
0057                 break;
0058             } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
0059                 buf->next = wq->bufs[i + 1];
0060             } else {
0061                 buf->next = buf + 1;
0062                 buf++;
0063             }
0064         }
0065     }
0066 
0067     wq->to_use = wq->to_clean = wq->bufs[0];
0068 
0069     return 0;
0070 }
0071 
0072 void vnic_wq_free(struct vnic_wq *wq)
0073 {
0074     struct vnic_dev *vdev;
0075     unsigned int i;
0076 
0077     vdev = wq->vdev;
0078 
0079     vnic_dev_free_desc_ring(vdev, &wq->ring);
0080 
0081     for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
0082         kfree(wq->bufs[i]);
0083         wq->bufs[i] = NULL;
0084     }
0085 
0086     wq->ctrl = NULL;
0087 
0088 }
0089 
0090 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
0091     unsigned int desc_count, unsigned int desc_size)
0092 {
0093     int err;
0094 
0095     wq->index = index;
0096     wq->vdev = vdev;
0097 
0098     wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
0099     if (!wq->ctrl) {
0100         printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
0101         return -EINVAL;
0102     }
0103 
0104     vnic_wq_disable(wq);
0105 
0106     err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
0107     if (err)
0108         return err;
0109 
0110     err = vnic_wq_alloc_bufs(wq);
0111     if (err) {
0112         vnic_wq_free(wq);
0113         return err;
0114     }
0115 
0116     return 0;
0117 }
0118 
0119 
0120 int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
0121         unsigned int desc_count, unsigned int desc_size)
0122 {
0123     int err;
0124 
0125     wq->index = 0;
0126     wq->vdev = vdev;
0127 
0128     err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
0129     if (err) {
0130         pr_err("Failed to get devcmd2 resource\n");
0131         return err;
0132     }
0133     vnic_wq_disable(wq);
0134 
0135     err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size);
0136     if (err)
0137         return err;
0138     return 0;
0139 }
0140 
0141 void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
0142         unsigned int fetch_index, unsigned int posted_index,
0143         unsigned int error_interrupt_enable,
0144         unsigned int error_interrupt_offset)
0145 {
0146     u64 paddr;
0147     unsigned int count = wq->ring.desc_count;
0148 
0149     paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
0150     writeq(paddr, &wq->ctrl->ring_base);
0151     iowrite32(count, &wq->ctrl->ring_size);
0152     iowrite32(fetch_index, &wq->ctrl->fetch_index);
0153     iowrite32(posted_index, &wq->ctrl->posted_index);
0154     iowrite32(cq_index, &wq->ctrl->cq_index);
0155     iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
0156     iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
0157     iowrite32(0, &wq->ctrl->error_status);
0158 
0159     wq->to_use = wq->to_clean =
0160         &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES]
0161         [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES];
0162 }
0163 
0164 
0165 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
0166     unsigned int error_interrupt_enable,
0167     unsigned int error_interrupt_offset)
0168 {
0169     u64 paddr;
0170 
0171     paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
0172     writeq(paddr, &wq->ctrl->ring_base);
0173     iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
0174     iowrite32(0, &wq->ctrl->fetch_index);
0175     iowrite32(0, &wq->ctrl->posted_index);
0176     iowrite32(cq_index, &wq->ctrl->cq_index);
0177     iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
0178     iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
0179     iowrite32(0, &wq->ctrl->error_status);
0180 }
0181 
0182 unsigned int vnic_wq_error_status(struct vnic_wq *wq)
0183 {
0184     return ioread32(&wq->ctrl->error_status);
0185 }
0186 
0187 void vnic_wq_enable(struct vnic_wq *wq)
0188 {
0189     iowrite32(1, &wq->ctrl->enable);
0190 }
0191 
0192 int vnic_wq_disable(struct vnic_wq *wq)
0193 {
0194     unsigned int wait;
0195 
0196     iowrite32(0, &wq->ctrl->enable);
0197 
0198     /* Wait for HW to ACK disable request */
0199     for (wait = 0; wait < 100; wait++) {
0200         if (!(ioread32(&wq->ctrl->running)))
0201             return 0;
0202         udelay(1);
0203     }
0204 
0205     printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
0206 
0207     return -ETIMEDOUT;
0208 }
0209 
0210 void vnic_wq_clean(struct vnic_wq *wq,
0211     void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
0212 {
0213     struct vnic_wq_buf *buf;
0214 
0215     BUG_ON(ioread32(&wq->ctrl->enable));
0216 
0217     buf = wq->to_clean;
0218 
0219     while (vnic_wq_desc_used(wq) > 0) {
0220 
0221         (*buf_clean)(wq, buf);
0222 
0223         buf = wq->to_clean = buf->next;
0224         wq->ring.desc_avail++;
0225     }
0226 
0227     wq->to_use = wq->to_clean = wq->bufs[0];
0228 
0229     iowrite32(0, &wq->ctrl->fetch_index);
0230     iowrite32(0, &wq->ctrl->posted_index);
0231     iowrite32(0, &wq->ctrl->error_status);
0232 
0233     vnic_dev_clear_desc_ring(&wq->ring);
0234 }