Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
0004  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
0005  */
0006 #ifndef _VNIC_WQ_COPY_H_
0007 #define _VNIC_WQ_COPY_H_
0008 
0009 #include <linux/pci.h>
0010 #include "vnic_wq.h"
0011 #include "fcpio.h"
0012 
0013 #define VNIC_WQ_COPY_MAX 1
0014 
0015 struct vnic_wq_copy {
0016     unsigned int index;
0017     struct vnic_dev *vdev;
0018     struct vnic_wq_ctrl __iomem *ctrl;  /* memory-mapped */
0019     struct vnic_dev_ring ring;
0020     unsigned to_use_index;
0021     unsigned to_clean_index;
0022 };
0023 
0024 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
0025 {
0026     return wq->ring.desc_avail;
0027 }
0028 
0029 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
0030 {
0031     return wq->ring.desc_count - 1 - wq->ring.desc_avail;
0032 }
0033 
0034 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
0035 {
0036     struct fcpio_host_req *desc = wq->ring.descs;
0037     return &desc[wq->to_use_index];
0038 }
0039 
0040 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
0041 {
0042 
0043     ((wq->to_use_index + 1) == wq->ring.desc_count) ?
0044         (wq->to_use_index = 0) : (wq->to_use_index++);
0045     wq->ring.desc_avail--;
0046 
0047     /* Adding write memory barrier prevents compiler and/or CPU
0048      * reordering, thus avoiding descriptor posting before
0049      * descriptor is initialized. Otherwise, hardware can read
0050      * stale descriptor fields.
0051      */
0052     wmb();
0053 
0054     iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
0055 }
0056 
0057 static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
0058 {
0059     unsigned int cnt;
0060 
0061     if (wq->to_clean_index <= index)
0062         cnt = (index - wq->to_clean_index) + 1;
0063     else
0064         cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
0065 
0066     wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
0067     wq->ring.desc_avail += cnt;
0068 
0069 }
0070 
0071 static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
0072     u16 completed_index,
0073     void (*q_service)(struct vnic_wq_copy *wq,
0074     struct fcpio_host_req *wq_desc))
0075 {
0076     struct fcpio_host_req *wq_desc = wq->ring.descs;
0077     unsigned int curr_index;
0078 
0079     while (1) {
0080 
0081         if (q_service)
0082             (*q_service)(wq, &wq_desc[wq->to_clean_index]);
0083 
0084         wq->ring.desc_avail++;
0085 
0086         curr_index = wq->to_clean_index;
0087 
0088         /* increment the to-clean index so that we start
0089          * with an unprocessed index next time we enter the loop
0090          */
0091         ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
0092             (wq->to_clean_index = 0) : (wq->to_clean_index++);
0093 
0094         if (curr_index == completed_index)
0095             break;
0096 
0097         /* we have cleaned all the entries */
0098         if ((completed_index == (u16)-1) &&
0099             (wq->to_clean_index == wq->to_use_index))
0100             break;
0101     }
0102 }
0103 
0104 void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
0105 int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
0106 void vnic_wq_copy_free(struct vnic_wq_copy *wq);
0107 int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
0108     unsigned int index, unsigned int desc_count, unsigned int desc_size);
0109 void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
0110     unsigned int error_interrupt_enable,
0111     unsigned int error_interrupt_offset);
0112 void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
0113     void (*q_clean)(struct vnic_wq_copy *wq,
0114     struct fcpio_host_req *wq_desc));
0115 
0116 #endif /* _VNIC_WQ_COPY_H_ */