0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #ifndef _OCTEON_MAIN_H_
0024 #define _OCTEON_MAIN_H_
0025
0026 #include <linux/sched/signal.h>
0027
0028 #if BITS_PER_LONG == 32
0029 #define CVM_CAST64(v) ((long long)(v))
0030 #elif BITS_PER_LONG == 64
0031 #define CVM_CAST64(v) ((long long)(long)(v))
0032 #else
0033 #error "Unknown system architecture"
0034 #endif
0035
0036 #define DRV_NAME "LiquidIO"
0037
0038 struct octeon_device_priv {
0039
0040 struct tasklet_struct droq_tasklet;
0041 unsigned long napi_mask;
0042 struct octeon_device *dev;
0043 };
0044
0045
0046
0047
0048
0049 struct octnet_buf_free_info {
0050
0051 struct lio *lio;
0052
0053
0054 struct sk_buff *skb;
0055
0056
0057 struct octnic_gather *g;
0058
0059
0060 u64 dptr;
0061
0062
0063 struct octeon_soft_command *sc;
0064 };
0065
0066
0067 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
0068 void octeon_update_tx_completion_counters(void *buf, int reqtype,
0069 unsigned int *pkts_compl,
0070 unsigned int *bytes_compl);
0071 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
0072 unsigned int bytes_compl);
0073 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
0074
0075 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
0076 struct octeon_droq *droq);
0077
0078
0079 static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
0080 {
0081 while (blocks) {
0082 cpu_to_be64s(data);
0083 blocks--;
0084 data++;
0085 }
0086 }
0087
0088
0089
0090
0091
0092
0093 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
0094 {
0095 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
0096 baridx);
0097
0098 if (oct->mmio[baridx].done)
0099 iounmap(oct->mmio[baridx].hw_addr);
0100
0101 if (oct->mmio[baridx].start)
0102 pci_release_region(oct->pci_dev, baridx * 2);
0103 }
0104
0105
0106
0107
0108
0109
0110
0111 static inline int octeon_map_pci_barx(struct octeon_device *oct,
0112 int baridx, int max_map_len)
0113 {
0114 u32 mapped_len = 0;
0115
0116 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
0117 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
0118 baridx);
0119 return 1;
0120 }
0121
0122 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
0123 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
0124
0125 mapped_len = oct->mmio[baridx].len;
0126 if (!mapped_len)
0127 goto err_release_region;
0128
0129 if (max_map_len && (mapped_len > max_map_len))
0130 mapped_len = max_map_len;
0131
0132 oct->mmio[baridx].hw_addr =
0133 ioremap(oct->mmio[baridx].start, mapped_len);
0134 oct->mmio[baridx].mapped_len = mapped_len;
0135
0136 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
0137 baridx, oct->mmio[baridx].start, mapped_len,
0138 oct->mmio[baridx].len);
0139
0140 if (!oct->mmio[baridx].hw_addr) {
0141 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
0142 baridx);
0143 goto err_release_region;
0144 }
0145 oct->mmio[baridx].done = 1;
0146
0147 return 0;
0148
0149 err_release_region:
0150 pci_release_region(oct->pci_dev, baridx * 2);
0151 return 1;
0152 }
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 static inline int
0185 wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
0186 struct octeon_soft_command *sc,
0187 unsigned long timeout)
0188 {
0189 int errno = 0;
0190 long timeout_jiff;
0191
0192 if (timeout)
0193 timeout_jiff = msecs_to_jiffies(timeout);
0194 else
0195 timeout_jiff = MAX_SCHEDULE_TIMEOUT;
0196
0197 timeout_jiff =
0198 wait_for_completion_interruptible_timeout(&sc->complete,
0199 timeout_jiff);
0200 if (timeout_jiff == 0) {
0201 dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
0202 __func__);
0203 WRITE_ONCE(sc->caller_is_done, true);
0204 errno = -ETIME;
0205 } else if (timeout_jiff == -ERESTARTSYS) {
0206 dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
0207 __func__);
0208 WRITE_ONCE(sc->caller_is_done, true);
0209 errno = -EINTR;
0210 } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
0211 dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
0212 __func__);
0213 WRITE_ONCE(sc->caller_is_done, true);
0214 errno = -EBUSY;
0215 }
0216
0217 return errno;
0218 }
0219
0220 #ifndef ROUNDUP4
0221 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
0222 #endif
0223
0224 #ifndef ROUNDUP8
0225 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
0226 #endif
0227
0228 #ifndef ROUNDUP16
0229 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
0230 #endif
0231
0232 #ifndef ROUNDUP128
0233 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
0234 #endif
0235
0236 #endif