Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /* Copyright 2014 Cisco Systems, Inc.  All rights reserved. */
0003 
0004 #ifndef _VNIC_WQ_H_
0005 #define _VNIC_WQ_H_
0006 
0007 #include <linux/pci.h>
0008 #include "vnic_dev.h"
0009 #include "vnic_cq.h"
0010 
0011 /* Work queue control */
0012 struct vnic_wq_ctrl {
0013     u64 ring_base;          /* 0x00 */
0014     u32 ring_size;          /* 0x08 */
0015     u32 pad0;
0016     u32 posted_index;       /* 0x10 */
0017     u32 pad1;
0018     u32 cq_index;           /* 0x18 */
0019     u32 pad2;
0020     u32 enable;         /* 0x20 */
0021     u32 pad3;
0022     u32 running;            /* 0x28 */
0023     u32 pad4;
0024     u32 fetch_index;        /* 0x30 */
0025     u32 pad5;
0026     u32 dca_value;          /* 0x38 */
0027     u32 pad6;
0028     u32 error_interrupt_enable; /* 0x40 */
0029     u32 pad7;
0030     u32 error_interrupt_offset; /* 0x48 */
0031     u32 pad8;
0032     u32 error_status;       /* 0x50 */
0033     u32 pad9;
0034 };
0035 
0036 struct vnic_wq_buf {
0037     struct vnic_wq_buf *next;
0038     dma_addr_t dma_addr;
0039     void *os_buf;
0040     unsigned int len;
0041     unsigned int index;
0042     int sop;
0043     void *desc;
0044 };
0045 
0046 /* Break the vnic_wq_buf allocations into blocks of 64 entries */
0047 #define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
0048 #define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
0049 #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
0050     ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
0051         VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
0052 #define VNIC_WQ_BUF_BLK_SZ \
0053     (VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
0054 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
0055     DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
0056 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
0057     DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
0058 #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
0059 
0060 struct vnic_wq {
0061     unsigned int index;
0062     struct vnic_dev *vdev;
0063     struct vnic_wq_ctrl __iomem *ctrl;  /* memory-mapped */
0064     struct vnic_dev_ring ring;
0065     struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
0066     struct vnic_wq_buf *to_use;
0067     struct vnic_wq_buf *to_clean;
0068     unsigned int pkts_outstanding;
0069 };
0070 
0071 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq)
0072 {
0073     /* how many does SW own? */
0074     return wq->ring.desc_avail;
0075 }
0076 
0077 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq)
0078 {
0079     /* how many does HW own? */
0080     return wq->ring.desc_count - wq->ring.desc_avail - 1;
0081 }
0082 
0083 static inline void *svnic_wq_next_desc(struct vnic_wq *wq)
0084 {
0085     return wq->to_use->desc;
0086 }
0087 
0088 static inline void svnic_wq_post(struct vnic_wq *wq,
0089     void *os_buf, dma_addr_t dma_addr,
0090     unsigned int len, int sop, int eop)
0091 {
0092     struct vnic_wq_buf *buf = wq->to_use;
0093 
0094     buf->sop = sop;
0095     buf->os_buf = eop ? os_buf : NULL;
0096     buf->dma_addr = dma_addr;
0097     buf->len = len;
0098 
0099     buf = buf->next;
0100     if (eop) {
0101         /* Adding write memory barrier prevents compiler and/or CPU
0102          * reordering, thus avoiding descriptor posting before
0103          * descriptor is initialized. Otherwise, hardware can read
0104          * stale descriptor fields.
0105          */
0106         wmb();
0107         iowrite32(buf->index, &wq->ctrl->posted_index);
0108     }
0109     wq->to_use = buf;
0110 
0111     wq->ring.desc_avail--;
0112 }
0113 
0114 static inline void svnic_wq_service(struct vnic_wq *wq,
0115     struct cq_desc *cq_desc, u16 completed_index,
0116     void (*buf_service)(struct vnic_wq *wq,
0117     struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
0118     void *opaque)
0119 {
0120     struct vnic_wq_buf *buf;
0121 
0122     buf = wq->to_clean;
0123     while (1) {
0124 
0125         (*buf_service)(wq, cq_desc, buf, opaque);
0126 
0127         wq->ring.desc_avail++;
0128 
0129         wq->to_clean = buf->next;
0130 
0131         if (buf->index == completed_index)
0132             break;
0133 
0134         buf = wq->to_clean;
0135     }
0136 }
0137 
0138 void svnic_wq_free(struct vnic_wq *wq);
0139 int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
0140     unsigned int index, unsigned int desc_count, unsigned int desc_size);
0141 int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
0142         unsigned int desc_count, unsigned int desc_size);
0143 void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
0144         unsigned int fetch_index, unsigned int post_index,
0145         unsigned int error_interrupt_enable,
0146         unsigned int error_interrupt_offset);
0147 
0148 void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
0149     unsigned int error_interrupt_enable,
0150     unsigned int error_interrupt_offset);
0151 unsigned int svnic_wq_error_status(struct vnic_wq *wq);
0152 void svnic_wq_enable(struct vnic_wq *wq);
0153 int svnic_wq_disable(struct vnic_wq *wq);
0154 void svnic_wq_clean(struct vnic_wq *wq,
0155     void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
0156 #endif /* _VNIC_WQ_H_ */