0001
0002 #ifndef __NITROX_DEV_H
0003 #define __NITROX_DEV_H
0004
0005 #include <linux/dma-mapping.h>
0006 #include <linux/interrupt.h>
0007 #include <linux/pci.h>
0008 #include <linux/if.h>
0009
0010 #define VERSION_LEN 32
0011
0012 #define MAX_PF_QUEUES 64
0013
0014 #define MAX_DEV_QUEUES (MAX_PF_QUEUES)
0015
0016 #define CNN55XX_MAX_UCD_BLOCKS 8
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 struct nitrox_cmdq {
0040 spinlock_t cmd_qlock;
0041 spinlock_t resp_qlock;
0042 spinlock_t backlog_qlock;
0043
0044 struct nitrox_device *ndev;
0045 struct list_head response_head;
0046 struct list_head backlog_head;
0047
0048 u8 __iomem *dbell_csr_addr;
0049 u8 __iomem *compl_cnt_csr_addr;
0050 u8 *base;
0051 dma_addr_t dma;
0052
0053 struct work_struct backlog_qflush;
0054
0055 atomic_t pending_count;
0056 atomic_t backlog_count;
0057
0058 int write_idx;
0059 u8 instr_size;
0060 u8 qno;
0061 u32 qsize;
0062
0063 u8 *unalign_base;
0064 dma_addr_t unalign_dma;
0065 };
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 struct nitrox_hw {
0080 char partname[IFNAMSIZ * 2];
0081 char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN];
0082
0083 int freq;
0084 u16 vendor_id;
0085 u16 device_id;
0086 u8 revision_id;
0087
0088 u8 se_cores;
0089 u8 ae_cores;
0090 u8 zip_cores;
0091 };
0092
0093 struct nitrox_stats {
0094 atomic64_t posted;
0095 atomic64_t completed;
0096 atomic64_t dropped;
0097 };
0098
0099 #define IRQ_NAMESZ 32
0100
0101 struct nitrox_q_vector {
0102 char name[IRQ_NAMESZ];
0103 bool valid;
0104 int ring;
0105 struct tasklet_struct resp_tasklet;
0106 union {
0107 struct nitrox_cmdq *cmdq;
0108 struct nitrox_device *ndev;
0109 };
0110 };
0111
0112 enum mcode_type {
0113 MCODE_TYPE_INVALID,
0114 MCODE_TYPE_AE,
0115 MCODE_TYPE_SE_SSL,
0116 MCODE_TYPE_SE_IPSEC,
0117 };
0118
0119
0120
0121
0122
0123
0124
0125 union mbox_msg {
0126 u64 value;
0127 struct {
0128 u64 type: 2;
0129 u64 opcode: 6;
0130 u64 data: 58;
0131 };
0132 struct {
0133 u64 type: 2;
0134 u64 opcode: 6;
0135 u64 chipid: 8;
0136 u64 vfid: 8;
0137 } id;
0138 struct {
0139 u64 type: 2;
0140 u64 opcode: 6;
0141 u64 count: 4;
0142 u64 info: 40;
0143 u64 next_se_grp: 3;
0144 u64 next_ae_grp: 3;
0145 } mcode_info;
0146 };
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 struct nitrox_vfdev {
0158 atomic_t state;
0159 int vfno;
0160 int nr_queues;
0161 int ring;
0162 union mbox_msg msg;
0163 atomic64_t mbx_resp;
0164 };
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 struct nitrox_iov {
0175 int num_vfs;
0176 int max_vf_queues;
0177 struct nitrox_vfdev *vfdev;
0178 struct workqueue_struct *pf2vf_wq;
0179 struct msix_entry msix;
0180 };
0181
0182
0183
0184
0185 enum ndev_state {
0186 __NDEV_NOT_READY,
0187 __NDEV_READY,
0188 __NDEV_IN_RESET,
0189 };
0190
0191
0192 enum vf_mode {
0193 __NDEV_MODE_PF,
0194 __NDEV_MODE_VF16,
0195 __NDEV_MODE_VF32,
0196 __NDEV_MODE_VF64,
0197 __NDEV_MODE_VF128,
0198 };
0199
0200 #define __NDEV_SRIOV_BIT 0
0201
0202
0203 #define DEFAULT_CMD_QLEN 2048
0204
0205 #define CMD_TIMEOUT 2000
0206
0207 #define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
0208
0209 #define NITROX_CSR_ADDR(ndev, offset) \
0210 ((ndev)->bar_addr + (offset))
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236 struct nitrox_device {
0237 struct list_head list;
0238
0239 u8 __iomem *bar_addr;
0240 struct pci_dev *pdev;
0241
0242 atomic_t state;
0243 unsigned long flags;
0244 unsigned long timeout;
0245 refcount_t refcnt;
0246
0247 u8 idx;
0248 int node;
0249 u16 qlen;
0250 u16 nr_queues;
0251 enum vf_mode mode;
0252
0253 struct dma_pool *ctx_pool;
0254 struct nitrox_cmdq *pkt_inq;
0255 struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
0256
0257 struct nitrox_q_vector *qvec;
0258 struct nitrox_iov iov;
0259 int num_vecs;
0260
0261 struct nitrox_stats stats;
0262 struct nitrox_hw hw;
0263 #if IS_ENABLED(CONFIG_DEBUG_FS)
0264 struct dentry *debugfs_dir;
0265 #endif
0266 };
0267
0268
0269
0270
0271
0272
0273
0274
0275 static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
0276 {
0277 return readq(ndev->bar_addr + offset);
0278 }
0279
0280
0281
0282
0283
0284
0285
0286 static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
0287 u64 value)
0288 {
0289 writeq(value, (ndev->bar_addr + offset));
0290 }
0291
0292 static inline bool nitrox_ready(struct nitrox_device *ndev)
0293 {
0294 return atomic_read(&ndev->state) == __NDEV_READY;
0295 }
0296
0297 static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
0298 {
0299 return atomic_read(&vfdev->state) == __NDEV_READY;
0300 }
0301
0302 #endif