0001
0002
0003
0004
0005
0006 #ifndef NICVF_QUEUES_H
0007 #define NICVF_QUEUES_H
0008
0009 #include <linux/netdevice.h>
0010 #include <linux/iommu.h>
0011 #include <net/xdp.h>
0012 #include "q_struct.h"
0013
0014 #define MAX_QUEUE_SET 128
0015 #define MAX_RCV_QUEUES_PER_QS 8
0016 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
0017 #define MAX_SND_QUEUES_PER_QS 8
0018 #define MAX_CMP_QUEUES_PER_QS 8
0019
0020
0021 #define NICVF_INTR_ID_CQ 0
0022 #define NICVF_INTR_ID_SQ 8
0023 #define NICVF_INTR_ID_RBDR 16
0024 #define NICVF_INTR_ID_MISC 18
0025 #define NICVF_INTR_ID_QS_ERR 19
0026
0027 #define for_each_cq_irq(irq) \
0028 for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
0029 #define for_each_sq_irq(irq) \
0030 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
0031 #define for_each_rbdr_irq(irq) \
0032 for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
0033
0034 #define RBDR_SIZE0 0ULL
0035 #define RBDR_SIZE1 1ULL
0036 #define RBDR_SIZE2 2ULL
0037 #define RBDR_SIZE3 3ULL
0038 #define RBDR_SIZE4 4ULL
0039 #define RBDR_SIZE5 5ULL
0040 #define RBDR_SIZE6 6ULL
0041
0042 #define SND_QUEUE_SIZE0 0ULL
0043 #define SND_QUEUE_SIZE1 1ULL
0044 #define SND_QUEUE_SIZE2 2ULL
0045 #define SND_QUEUE_SIZE3 3ULL
0046 #define SND_QUEUE_SIZE4 4ULL
0047 #define SND_QUEUE_SIZE5 5ULL
0048 #define SND_QUEUE_SIZE6 6ULL
0049
0050 #define CMP_QUEUE_SIZE0 0ULL
0051 #define CMP_QUEUE_SIZE1 1ULL
0052 #define CMP_QUEUE_SIZE2 2ULL
0053 #define CMP_QUEUE_SIZE3 3ULL
0054 #define CMP_QUEUE_SIZE4 4ULL
0055 #define CMP_QUEUE_SIZE5 5ULL
0056 #define CMP_QUEUE_SIZE6 6ULL
0057
0058
0059 #define DEFAULT_RBDR_CNT 1
0060
0061 #define SND_QSIZE SND_QUEUE_SIZE0
0062 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
0063 #define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10))
0064 #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
0065 #define SND_QUEUE_THRESH 2ULL
0066 #define MIN_SQ_DESC_PER_PKT_XMIT 2
0067
0068 #define MAX_CQE_PER_PKT_XMIT 1
0069
0070
0071
0072
0073 #define CMP_QSIZE CMP_QUEUE_SIZE0
0074 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
0075 #define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10))
0076 #define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10))
0077 #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
0078 #define CMP_QUEUE_TIMER_THRESH 80
0079
0080
0081
0082
0083 #define CMP_QUEUE_PIPELINE_RSVD 544
0084
0085 #define RBDR_SIZE RBDR_SIZE0
0086 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
0087 #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
0088 #define RBDR_THRESH (RCV_BUF_COUNT / 2)
0089 #define DMA_BUFFER_LEN 1536
0090 #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
0091 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0092
0093 #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
0094 MAX_CQE_PER_PKT_XMIT)
0095
0096
0097
0098
0099
0100
0101
0102
0103 #define RQ_PASS_CQ_LVL 224ULL
0104 #define RQ_DROP_CQ_LVL 216ULL
0105
0106
0107
0108
0109
0110
0111
0112
0113 #define RQ_PASS_RBDR_LVL 8ULL
0114 #define RQ_DROP_RBDR_LVL 0ULL
0115
0116
0117 #define SND_QUEUE_DESC_SIZE 16
0118 #define CMP_QUEUE_DESC_SIZE 512
0119
0120
0121 #define NICVF_RCV_BUF_ALIGN 7
0122 #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
0123 #define NICVF_CQ_BASE_ALIGN_BYTES 512
0124 #define NICVF_SQ_BASE_ALIGN_BYTES 128
0125
0126 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
0127
0128
0129 #define NICVF_SQ_EN BIT_ULL(19)
0130
0131
0132 #define NICVF_CQ_RESET BIT_ULL(41)
0133 #define NICVF_SQ_RESET BIT_ULL(17)
0134 #define NICVF_RBDR_RESET BIT_ULL(43)
0135
0136 enum CQ_RX_ERRLVL_E {
0137 CQ_ERRLVL_MAC,
0138 CQ_ERRLVL_L2,
0139 CQ_ERRLVL_L3,
0140 CQ_ERRLVL_L4,
0141 };
0142
0143 enum CQ_RX_ERROP_E {
0144 CQ_RX_ERROP_RE_NONE = 0x0,
0145 CQ_RX_ERROP_RE_PARTIAL = 0x1,
0146 CQ_RX_ERROP_RE_JABBER = 0x2,
0147 CQ_RX_ERROP_RE_FCS = 0x7,
0148 CQ_RX_ERROP_RE_TERMINATE = 0x9,
0149 CQ_RX_ERROP_RE_RX_CTL = 0xb,
0150 CQ_RX_ERROP_PREL2_ERR = 0x1f,
0151 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
0152 CQ_RX_ERROP_L2_OVERRUN = 0x21,
0153 CQ_RX_ERROP_L2_PFCS = 0x22,
0154 CQ_RX_ERROP_L2_PUNY = 0x23,
0155 CQ_RX_ERROP_L2_MAL = 0x24,
0156 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
0157 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
0158 CQ_RX_ERROP_L2_LENMISM = 0x27,
0159 CQ_RX_ERROP_L2_PCLP = 0x28,
0160 CQ_RX_ERROP_IP_NOT = 0x41,
0161 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
0162 CQ_RX_ERROP_IP_MAL = 0x43,
0163 CQ_RX_ERROP_IP_MALD = 0x44,
0164 CQ_RX_ERROP_IP_HOP = 0x45,
0165 CQ_RX_ERROP_L3_ICRC = 0x46,
0166 CQ_RX_ERROP_L3_PCLP = 0x47,
0167 CQ_RX_ERROP_L4_MAL = 0x61,
0168 CQ_RX_ERROP_L4_CHK = 0x62,
0169 CQ_RX_ERROP_UDP_LEN = 0x63,
0170 CQ_RX_ERROP_L4_PORT = 0x64,
0171 CQ_RX_ERROP_TCP_FLAG = 0x65,
0172 CQ_RX_ERROP_TCP_OFFSET = 0x66,
0173 CQ_RX_ERROP_L4_PCLP = 0x67,
0174 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
0175 };
0176
0177 enum CQ_TX_ERROP_E {
0178 CQ_TX_ERROP_GOOD = 0x0,
0179 CQ_TX_ERROP_DESC_FAULT = 0x10,
0180 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
0181 CQ_TX_ERROP_SUBDC_ERR = 0x12,
0182 CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
0183 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
0184 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
0185 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
0186 CQ_TX_ERROP_LOCK_VIOL = 0x83,
0187 CQ_TX_ERROP_DATA_FAULT = 0x84,
0188 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
0189 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
0190 CQ_TX_ERROP_MEM_FAULT = 0x87,
0191 CQ_TX_ERROP_CK_OVERLAP = 0x88,
0192 CQ_TX_ERROP_CK_OFLOW = 0x89,
0193 CQ_TX_ERROP_ENUM_LAST = 0x8a,
0194 };
0195
0196 enum RQ_SQ_STATS {
0197 RQ_SQ_STATS_OCTS,
0198 RQ_SQ_STATS_PKTS,
0199 };
0200
0201 struct rx_tx_queue_stats {
0202 u64 bytes;
0203 u64 pkts;
0204 } ____cacheline_aligned_in_smp;
0205
0206 struct q_desc_mem {
0207 dma_addr_t dma;
0208 u64 size;
0209 u32 q_len;
0210 dma_addr_t phys_base;
0211 void *base;
0212 void *unalign_base;
0213 };
0214
0215 struct pgcache {
0216 struct page *page;
0217 int ref_count;
0218 u64 dma_addr;
0219 };
0220
0221 struct rbdr {
0222 bool enable;
0223 u32 dma_size;
0224 u32 frag_len;
0225 u32 thresh;
0226 void *desc;
0227 u32 head;
0228 u32 tail;
0229 struct q_desc_mem dmem;
0230 bool is_xdp;
0231
0232
0233 int pgidx;
0234 int pgcnt;
0235 int pgalloc;
0236 struct pgcache *pgcache;
0237 } ____cacheline_aligned_in_smp;
0238
0239 struct rcv_queue {
0240 bool enable;
0241 struct rbdr *rbdr_start;
0242 struct rbdr *rbdr_cont;
0243 bool en_tcp_reassembly;
0244 u8 cq_qs;
0245 u8 cq_idx;
0246 u8 cont_rbdr_qs;
0247 u8 cont_qs_rbdr_idx;
0248 u8 start_rbdr_qs;
0249 u8 start_qs_rbdr_idx;
0250 u8 caching;
0251 struct rx_tx_queue_stats stats;
0252 struct xdp_rxq_info xdp_rxq;
0253 } ____cacheline_aligned_in_smp;
0254
0255 struct cmp_queue {
0256 bool enable;
0257 u16 thresh;
0258 spinlock_t lock;
0259 void *desc;
0260 struct q_desc_mem dmem;
0261 int irq;
0262 } ____cacheline_aligned_in_smp;
0263
0264 struct snd_queue {
0265 bool enable;
0266 u8 cq_qs;
0267 u8 cq_idx;
0268 u16 thresh;
0269 atomic_t free_cnt;
0270 u32 head;
0271 u32 tail;
0272 u64 *skbuff;
0273 void *desc;
0274 u64 *xdp_page;
0275 u16 xdp_desc_cnt;
0276 u16 xdp_free_cnt;
0277 bool is_xdp;
0278
0279
0280 char *tso_hdrs;
0281 dma_addr_t tso_hdrs_phys;
0282
0283 cpumask_t affinity_mask;
0284 struct q_desc_mem dmem;
0285 struct rx_tx_queue_stats stats;
0286 } ____cacheline_aligned_in_smp;
0287
0288 struct queue_set {
0289 bool enable;
0290 bool be_en;
0291 u8 vnic_id;
0292 u8 rq_cnt;
0293 u8 cq_cnt;
0294 u64 cq_len;
0295 u8 sq_cnt;
0296 u64 sq_len;
0297 u8 rbdr_cnt;
0298 u64 rbdr_len;
0299 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
0300 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
0301 struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
0302 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
0303 } ____cacheline_aligned_in_smp;
0304
0305 #define GET_RBDR_DESC(RING, idx)\
0306 (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
0307 #define GET_SQ_DESC(RING, idx)\
0308 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
0309 #define GET_CQ_DESC(RING, idx)\
0310 (&(((union cq_desc_t *)((RING)->desc))[idx]))
0311
0312
0313 #define CQ_WR_FULL BIT(26)
0314 #define CQ_WR_DISABLE BIT(25)
0315 #define CQ_WR_FAULT BIT(24)
0316 #define CQ_CQE_COUNT (0xFFFF << 0)
0317
0318 #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
0319
0320 static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
0321 {
0322
0323 if (nic->iommu_domain)
0324 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
0325 return dma_addr;
0326 }
0327
0328 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
0329 int hdr_sqe, u8 subdesc_cnt);
0330 void nicvf_config_vlan_stripping(struct nicvf *nic,
0331 netdev_features_t features);
0332 int nicvf_set_qset_resources(struct nicvf *nic);
0333 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
0334 void nicvf_qset_config(struct nicvf *nic, bool enable);
0335 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
0336 int qidx, bool enable);
0337
0338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
0339 void nicvf_sq_disable(struct nicvf *nic, int qidx);
0340 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
0341 void nicvf_sq_free_used_descs(struct net_device *netdev,
0342 struct snd_queue *sq, int qidx);
0343 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
0344 struct sk_buff *skb, u8 sq_num);
0345 int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
0346 u64 bufaddr, u64 dma_addr, u16 len);
0347 void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
0348
0349 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
0350 struct cqe_rx_t *cqe_rx, bool xdp);
0351 void nicvf_rbdr_task(struct tasklet_struct *t);
0352 void nicvf_rbdr_work(struct work_struct *work);
0353
0354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
0355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
0356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
0357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
0358
0359
0360 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
0361 u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
0362 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
0363 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
0364 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
0365 u64 qidx, u64 val);
0366 u64 nicvf_queue_reg_read(struct nicvf *nic,
0367 u64 offset, u64 qidx);
0368
0369
0370 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
0371 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
0372 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
0373 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
0374 #endif