Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Marvell RVU Ethernet driver
0003  *
0004  * Copyright (C) 2020 Marvell.
0005  *
0006  */
0007 
0008 #ifndef OTX2_TXRX_H
0009 #define OTX2_TXRX_H
0010 
0011 #include <linux/etherdevice.h>
0012 #include <linux/iommu.h>
0013 #include <linux/if_vlan.h>
0014 #include <net/xdp.h>
0015 
0016 #define LBK_CHAN_BASE   0x000
0017 #define SDP_CHAN_BASE   0x700
0018 #define CGX_CHAN_BASE   0x800
0019 
0020 #define OTX2_DATA_ALIGN(X)  ALIGN(X, OTX2_ALIGN)
0021 #define OTX2_HEAD_ROOM      OTX2_ALIGN
0022 
0023 #define OTX2_ETH_HLEN       (VLAN_ETH_HLEN + VLAN_HLEN)
0024 #define OTX2_MIN_MTU        60
0025 
0026 #define OTX2_MAX_GSO_SEGS   255
0027 #define OTX2_MAX_FRAGS_IN_SQE   9
0028 
0029 #define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN)
0030 
0031 /* Rx buffer size should be in multiples of 128bytes */
0032 #define RCV_FRAG_LEN1(x)                \
0033         ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
0034         OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
0035 
0036 /* Prefer 2048 byte buffers for better last level cache
0037  * utilization or data distribution across regions.
0038  */
0039 #define RCV_FRAG_LEN(x) \
0040         ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
0041 
0042 #define DMA_BUFFER_LEN(x)   ((x) - OTX2_HEAD_ROOM)
0043 
0044 /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
0045  * is equal to this value.
0046  */
0047 #define CQ_CQE_THRESH_DEFAULT   10
0048 
0049 /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
0050  * is nonzero and this much time elapses after that.
0051  */
0052 #define CQ_TIMER_THRESH_DEFAULT 1  /* 1 usec */
0053 #define CQ_TIMER_THRESH_MAX     25 /* 25 usec */
0054 
0055 /* Min number of CQs (of the ones mapped to this CINT)
0056  * with valid CQEs.
0057  */
0058 #define CQ_QCOUNT_DEFAULT   1
0059 
0060 #define CQ_OP_STAT_OP_ERR       63
0061 #define CQ_OP_STAT_CQ_ERR       46
0062 
0063 struct queue_stats {
0064     u64 bytes;
0065     u64 pkts;
0066 };
0067 
0068 struct otx2_rcv_queue {
0069     struct queue_stats  stats;
0070 };
0071 
0072 struct sg_list {
0073     u16 num_segs;
0074     u64 skb;
0075     u64 size[OTX2_MAX_FRAGS_IN_SQE];
0076     u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
0077 };
0078 
0079 struct otx2_snd_queue {
0080     u8          aura_id;
0081     u16         head;
0082     u16         sqe_size;
0083     u32         sqe_cnt;
0084     u16         num_sqbs;
0085     u16         sqe_thresh;
0086     u8          sqe_per_sqb;
0087     u64          io_addr;
0088     u64         *aura_fc_addr;
0089     u64         *lmt_addr;
0090     void            *sqe_base;
0091     struct qmem     *sqe;
0092     struct qmem     *tso_hdrs;
0093     struct sg_list      *sg;
0094     struct qmem     *timestamps;
0095     struct queue_stats  stats;
0096     u16         sqb_count;
0097     u64         *sqb_ptrs;
0098 } ____cacheline_aligned_in_smp;
0099 
0100 enum cq_type {
0101     CQ_RX,
0102     CQ_TX,
0103     CQ_XDP,
0104     CQS_PER_CINT = 3, /* RQ + SQ + XDP */
0105 };
0106 
0107 struct otx2_cq_poll {
0108     void            *dev;
0109 #define CINT_INVALID_CQ     255
0110     u8          cint_idx;
0111     u8          cq_ids[CQS_PER_CINT];
0112     struct dim      dim;
0113     struct napi_struct  napi;
0114 };
0115 
0116 struct otx2_pool {
0117     struct qmem     *stack;
0118     struct qmem     *fc_addr;
0119     u16         rbsize;
0120 };
0121 
0122 struct otx2_cq_queue {
0123     u8          cq_idx;
0124     u8          cq_type;
0125     u8          cint_idx; /* CQ interrupt id */
0126     u8          refill_task_sched;
0127     u16         cqe_size;
0128     u16         pool_ptrs;
0129     u32         cqe_cnt;
0130     u32         cq_head;
0131     u32         cq_tail;
0132     u32         pend_cqe;
0133     void            *cqe_base;
0134     struct qmem     *cqe;
0135     struct otx2_pool    *rbpool;
0136     struct xdp_rxq_info xdp_rxq;
0137 } ____cacheline_aligned_in_smp;
0138 
0139 struct otx2_qset {
0140     u32         rqe_cnt;
0141     u32         sqe_cnt; /* Keep these two at top */
0142 #define OTX2_MAX_CQ_CNT     64
0143     u16         cq_cnt;
0144     u16         xqe_size;
0145     struct otx2_pool    *pool;
0146     struct otx2_cq_poll *napi;
0147     struct otx2_cq_queue    *cq;
0148     struct otx2_snd_queue   *sq;
0149     struct otx2_rcv_queue   *rq;
0150 };
0151 
0152 /* Translate IOVA to physical address */
0153 static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
0154 {
0155     /* Translation is installed only when IOMMU is present */
0156     if (likely(iommu_domain))
0157         return iommu_iova_to_phys(iommu_domain, dma_addr);
0158     return dma_addr;
0159 }
0160 
0161 int otx2_napi_handler(struct napi_struct *napi, int budget);
0162 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
0163             struct sk_buff *skb, u16 qidx);
0164 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
0165              int size, int qidx);
0166 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
0167             int size, int qidx);
0168 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
0169 void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
0170 #endif /* OTX2_TXRX_H */