Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Marvell RVU Ethernet driver
0003  *
0004  * Copyright (C) 2020 Marvell.
0005  *
0006  */
0007 
0008 #ifndef OTX2_COMMON_H
0009 #define OTX2_COMMON_H
0010 
0011 #include <linux/ethtool.h>
0012 #include <linux/pci.h>
0013 #include <linux/iommu.h>
0014 #include <linux/net_tstamp.h>
0015 #include <linux/ptp_clock_kernel.h>
0016 #include <linux/timecounter.h>
0017 #include <linux/soc/marvell/octeontx2/asm.h>
0018 #include <net/pkt_cls.h>
0019 #include <net/devlink.h>
0020 #include <linux/time64.h>
0021 #include <linux/dim.h>
0022 
0023 #include <mbox.h>
0024 #include <npc.h>
0025 #include "otx2_reg.h"
0026 #include "otx2_txrx.h"
0027 #include "otx2_devlink.h"
0028 #include <rvu_trace.h>
0029 
0030 /* PCI device IDs */
0031 #define PCI_DEVID_OCTEONTX2_RVU_PF              0xA063
0032 #define PCI_DEVID_OCTEONTX2_RVU_VF      0xA064
0033 #define PCI_DEVID_OCTEONTX2_RVU_AFVF        0xA0F8
0034 
0035 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF      0xB200
0036 
0037 /* PCI BAR nos */
0038 #define PCI_CFG_REG_BAR_NUM                     2
0039 #define PCI_MBOX_BAR_NUM                        4
0040 
0041 #define NAME_SIZE                               32
0042 
0043 enum arua_mapped_qtypes {
0044     AURA_NIX_RQ,
0045     AURA_NIX_SQ,
0046 };
0047 
0048 /* NIX LF interrupts range*/
0049 #define NIX_LF_QINT_VEC_START           0x00
0050 #define NIX_LF_CINT_VEC_START           0x40
0051 #define NIX_LF_GINT_VEC             0x80
0052 #define NIX_LF_ERR_VEC              0x81
0053 #define NIX_LF_POISON_VEC           0x82
0054 
0055 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */
0056 #define SEND_CQ_SKID    2000
0057 
0058 #define OTX2_GET_RX_STATS(reg) \
0059     otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
0060 #define OTX2_GET_TX_STATS(reg) \
0061     otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
0062 
0063 struct otx2_lmt_info {
0064     u64 lmt_addr;
0065     u16 lmt_id;
0066 };
0067 /* RSS configuration */
0068 struct otx2_rss_ctx {
0069     u8  ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
0070 };
0071 
0072 struct otx2_rss_info {
0073     u8 enable;
0074     u32 flowkey_cfg;
0075     u16 rss_size;
0076 #define RSS_HASH_KEY_SIZE   44   /* 352 bit key */
0077     u8  key[RSS_HASH_KEY_SIZE];
0078     struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
0079 };
0080 
0081 /* NIX (or NPC) RX errors */
0082 enum otx2_errlvl {
0083     NPC_ERRLVL_RE,
0084     NPC_ERRLVL_LID_LA,
0085     NPC_ERRLVL_LID_LB,
0086     NPC_ERRLVL_LID_LC,
0087     NPC_ERRLVL_LID_LD,
0088     NPC_ERRLVL_LID_LE,
0089     NPC_ERRLVL_LID_LF,
0090     NPC_ERRLVL_LID_LG,
0091     NPC_ERRLVL_LID_LH,
0092     NPC_ERRLVL_NIX = 0x0F,
0093 };
0094 
0095 enum otx2_errcodes_re {
0096     /* NPC_ERRLVL_RE errcodes */
0097     ERRCODE_FCS = 0x7,
0098     ERRCODE_FCS_RCV = 0x8,
0099     ERRCODE_UNDERSIZE = 0x10,
0100     ERRCODE_OVERSIZE = 0x11,
0101     ERRCODE_OL2_LEN_MISMATCH = 0x12,
0102     /* NPC_ERRLVL_NIX errcodes */
0103     ERRCODE_OL3_LEN = 0x10,
0104     ERRCODE_OL4_LEN = 0x11,
0105     ERRCODE_OL4_CSUM = 0x12,
0106     ERRCODE_IL3_LEN = 0x20,
0107     ERRCODE_IL4_LEN = 0x21,
0108     ERRCODE_IL4_CSUM = 0x22,
0109 };
0110 
0111 /* NIX TX stats */
0112 enum nix_stat_lf_tx {
0113     TX_UCAST    = 0x0,
0114     TX_BCAST    = 0x1,
0115     TX_MCAST    = 0x2,
0116     TX_DROP     = 0x3,
0117     TX_OCTS     = 0x4,
0118     TX_STATS_ENUM_LAST,
0119 };
0120 
0121 /* NIX RX stats */
0122 enum nix_stat_lf_rx {
0123     RX_OCTS     = 0x0,
0124     RX_UCAST    = 0x1,
0125     RX_BCAST    = 0x2,
0126     RX_MCAST    = 0x3,
0127     RX_DROP     = 0x4,
0128     RX_DROP_OCTS    = 0x5,
0129     RX_FCS      = 0x6,
0130     RX_ERR      = 0x7,
0131     RX_DRP_BCAST    = 0x8,
0132     RX_DRP_MCAST    = 0x9,
0133     RX_DRP_L3BCAST  = 0xa,
0134     RX_DRP_L3MCAST  = 0xb,
0135     RX_STATS_ENUM_LAST,
0136 };
0137 
0138 struct otx2_dev_stats {
0139     u64 rx_bytes;
0140     u64 rx_frames;
0141     u64 rx_ucast_frames;
0142     u64 rx_bcast_frames;
0143     u64 rx_mcast_frames;
0144     u64 rx_drops;
0145 
0146     u64 tx_bytes;
0147     u64 tx_frames;
0148     u64 tx_ucast_frames;
0149     u64 tx_bcast_frames;
0150     u64 tx_mcast_frames;
0151     u64 tx_drops;
0152 };
0153 
0154 /* Driver counted stats */
0155 struct otx2_drv_stats {
0156     atomic_t rx_fcs_errs;
0157     atomic_t rx_oversize_errs;
0158     atomic_t rx_undersize_errs;
0159     atomic_t rx_csum_errs;
0160     atomic_t rx_len_errs;
0161     atomic_t rx_other_errs;
0162 };
0163 
0164 struct mbox {
0165     struct otx2_mbox    mbox;
0166     struct work_struct  mbox_wrk;
0167     struct otx2_mbox    mbox_up;
0168     struct work_struct  mbox_up_wrk;
0169     struct otx2_nic     *pfvf;
0170     void            *bbuf_base; /* Bounce buffer for mbox memory */
0171     struct mutex        lock;   /* serialize mailbox access */
0172     int         num_msgs; /* mbox number of messages */
0173     int         up_num_msgs; /* mbox_up number of messages */
0174 };
0175 
0176 struct otx2_hw {
0177     struct pci_dev      *pdev;
0178     struct otx2_rss_info    rss_info;
0179     u16                     rx_queues;
0180     u16                     tx_queues;
0181     u16                     xdp_queues;
0182     u16                     tot_tx_queues;
0183     u16         max_queues;
0184     u16         pool_cnt;
0185     u16         rqpool_cnt;
0186     u16         sqpool_cnt;
0187 
0188 #define OTX2_DEFAULT_RBUF_LEN   2048
0189     u16         rbuf_len;
0190     u32         xqe_size;
0191 
0192     /* NPA */
0193     u32         stack_pg_ptrs;  /* No of ptrs per stack page */
0194     u32         stack_pg_bytes; /* Size of stack page */
0195     u16         sqb_size;
0196 
0197     /* NIX */
0198     u8          txschq_link_cfg_lvl;
0199     u16     txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
0200     u16         matchall_ipolicer;
0201     u32         dwrr_mtu;
0202 
0203     /* HW settings, coalescing etc */
0204     u16         rx_chan_base;
0205     u16         tx_chan_base;
0206     u16         cq_qcount_wait;
0207     u16         cq_ecount_wait;
0208     u16         rq_skid;
0209     u8          cq_time_wait;
0210 
0211     /* Segmentation */
0212     u8          lso_tsov4_idx;
0213     u8          lso_tsov6_idx;
0214     u8          lso_udpv4_idx;
0215     u8          lso_udpv6_idx;
0216 
0217     /* RSS */
0218     u8          flowkey_alg_idx;
0219 
0220     /* MSI-X */
0221     u8          cint_cnt; /* CQ interrupt count */
0222     u16         npa_msixoff; /* Offset of NPA vectors */
0223     u16         nix_msixoff; /* Offset of NIX vectors */
0224     char            *irq_name;
0225     cpumask_var_t           *affinity_mask;
0226 
0227     /* Stats */
0228     struct otx2_dev_stats   dev_stats;
0229     struct otx2_drv_stats   drv_stats;
0230     u64         cgx_rx_stats[CGX_RX_STATS_COUNT];
0231     u64         cgx_tx_stats[CGX_TX_STATS_COUNT];
0232     u64         cgx_fec_corr_blks;
0233     u64         cgx_fec_uncorr_blks;
0234     u8          cgx_links;  /* No. of CGX links present in HW */
0235     u8          lbk_links;  /* No. of LBK links present in HW */
0236     u8          tx_link;    /* Transmit channel link number */
0237 #define HW_TSO          0
0238 #define CN10K_MBOX      1
0239 #define CN10K_LMTST     2
0240 #define CN10K_RPM       3
0241     unsigned long       cap_flag;
0242 
0243 #define LMT_LINE_SIZE       128
0244 #define LMT_BURST_SIZE      32 /* 32 LMTST lines for burst SQE flush */
0245     u64         *lmt_base;
0246     struct otx2_lmt_info    __percpu *lmt_info;
0247 };
0248 
0249 enum vfperm {
0250     OTX2_RESET_VF_PERM,
0251     OTX2_TRUSTED_VF,
0252 };
0253 
0254 struct otx2_vf_config {
0255     struct otx2_nic *pf;
0256     struct delayed_work link_event_work;
0257     bool intf_down; /* interface was either configured or not */
0258     u8 mac[ETH_ALEN];
0259     u16 vlan;
0260     int tx_vtag_idx;
0261     bool trusted;
0262 };
0263 
0264 struct flr_work {
0265     struct work_struct work;
0266     struct otx2_nic *pf;
0267 };
0268 
0269 struct refill_work {
0270     struct delayed_work pool_refill_work;
0271     struct otx2_nic *pf;
0272 };
0273 
0274 struct otx2_ptp {
0275     struct ptp_clock_info ptp_info;
0276     struct ptp_clock *ptp_clock;
0277     struct otx2_nic *nic;
0278 
0279     struct cyclecounter cycle_counter;
0280     struct timecounter time_counter;
0281 
0282     struct delayed_work extts_work;
0283     u64 last_extts;
0284     u64 thresh;
0285 
0286     struct ptp_pin_desc extts_config;
0287     u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
0288     u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
0289 };
0290 
0291 #define OTX2_HW_TIMESTAMP_LEN   8
0292 
0293 struct otx2_mac_table {
0294     u8 addr[ETH_ALEN];
0295     u16 mcam_entry;
0296     bool inuse;
0297 };
0298 
0299 struct otx2_flow_config {
0300     u16         *flow_ent;
0301     u16         *def_ent;
0302     u16         nr_flows;
0303 #define OTX2_DEFAULT_FLOWCOUNT      16
0304 #define OTX2_MAX_UNICAST_FLOWS      8
0305 #define OTX2_MAX_VLAN_FLOWS     1
0306 #define OTX2_MAX_TC_FLOWS   OTX2_DEFAULT_FLOWCOUNT
0307 #define OTX2_MCAM_COUNT     (OTX2_DEFAULT_FLOWCOUNT + \
0308                  OTX2_MAX_UNICAST_FLOWS + \
0309                  OTX2_MAX_VLAN_FLOWS)
0310     u16         unicast_offset;
0311     u16         rx_vlan_offset;
0312     u16         vf_vlan_offset;
0313 #define OTX2_PER_VF_VLAN_FLOWS  2 /* Rx + Tx per VF */
0314 #define OTX2_VF_VLAN_RX_INDEX   0
0315 #define OTX2_VF_VLAN_TX_INDEX   1
0316     u16         max_flows;
0317     u8          dmacflt_max_flows;
0318     u32         *bmap_to_dmacindex;
0319     unsigned long       *dmacflt_bmap;
0320     struct list_head    flow_list;
0321 };
0322 
0323 struct otx2_tc_info {
0324     /* hash table to store TC offloaded flows */
0325     struct rhashtable       flow_table;
0326     struct rhashtable_params    flow_ht_params;
0327     unsigned long           *tc_entries_bitmap;
0328 };
0329 
0330 struct dev_hw_ops {
0331     int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
0332     void    (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
0333                  int size, int qidx);
0334     void    (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
0335     void    (*aura_freeptr)(void *dev, int aura, u64 buf);
0336 };
0337 
0338 struct otx2_nic {
0339     void __iomem        *reg_base;
0340     struct net_device   *netdev;
0341     struct dev_hw_ops   *hw_ops;
0342     void            *iommu_domain;
0343     u16         tx_max_pktlen;
0344     u16         rbsize; /* Receive buffer size */
0345 
0346 #define OTX2_FLAG_RX_TSTAMP_ENABLED     BIT_ULL(0)
0347 #define OTX2_FLAG_TX_TSTAMP_ENABLED     BIT_ULL(1)
0348 #define OTX2_FLAG_INTF_DOWN         BIT_ULL(2)
0349 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC        BIT_ULL(3)
0350 #define OTX2_FLAG_NTUPLE_SUPPORT        BIT_ULL(4)
0351 #define OTX2_FLAG_UCAST_FLTR_SUPPORT        BIT_ULL(5)
0352 #define OTX2_FLAG_RX_VLAN_SUPPORT       BIT_ULL(6)
0353 #define OTX2_FLAG_VF_VLAN_SUPPORT       BIT_ULL(7)
0354 #define OTX2_FLAG_PF_SHUTDOWN           BIT_ULL(8)
0355 #define OTX2_FLAG_RX_PAUSE_ENABLED      BIT_ULL(9)
0356 #define OTX2_FLAG_TX_PAUSE_ENABLED      BIT_ULL(10)
0357 #define OTX2_FLAG_TC_FLOWER_SUPPORT     BIT_ULL(11)
0358 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED    BIT_ULL(12)
0359 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED   BIT_ULL(13)
0360 #define OTX2_FLAG_DMACFLTR_SUPPORT      BIT_ULL(14)
0361 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
0362     u64         flags;
0363     u64         *cq_op_addr;
0364 
0365     struct bpf_prog     *xdp_prog;
0366     struct otx2_qset    qset;
0367     struct otx2_hw      hw;
0368     struct pci_dev      *pdev;
0369     struct device       *dev;
0370 
0371     /* Mbox */
0372     struct mbox     mbox;
0373     struct mbox     *mbox_pfvf;
0374     struct workqueue_struct *mbox_wq;
0375     struct workqueue_struct *mbox_pfvf_wq;
0376 
0377     u8          total_vfs;
0378     u16         pcifunc; /* RVU PF_FUNC */
0379     u16         bpid[NIX_MAX_BPID_CHAN];
0380     struct otx2_vf_config   *vf_configs;
0381     struct cgx_link_user_info linfo;
0382 
0383     /* NPC MCAM */
0384     struct otx2_flow_config *flow_cfg;
0385     struct otx2_mac_table   *mac_table;
0386     struct otx2_tc_info tc_info;
0387 
0388     u64         reset_count;
0389     struct work_struct  reset_task;
0390     struct workqueue_struct *flr_wq;
0391     struct flr_work     *flr_wrk;
0392     struct refill_work  *refill_wrk;
0393     struct workqueue_struct *otx2_wq;
0394     struct work_struct  rx_mode_work;
0395 
0396     /* Ethtool stuff */
0397     u32         msg_enable;
0398 
0399     /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
0400     int         nix_blkaddr;
0401     /* LMTST Lines info */
0402     struct qmem     *dync_lmt;
0403     u16         tot_lmt_lines;
0404     u16         npa_lmt_lines;
0405     u32         nix_lmt_size;
0406 
0407     struct otx2_ptp     *ptp;
0408     struct hwtstamp_config  tstamp;
0409 
0410     unsigned long       rq_bmap;
0411 
0412     /* Devlink */
0413     struct otx2_devlink *dl;
0414 #ifdef CONFIG_DCB
0415     /* PFC */
0416     u8          pfc_en;
0417     u8          *queue_to_pfc_map;
0418 #endif
0419 
0420     /* napi event count. It is needed for adaptive irq coalescing. */
0421     u32 napi_events;
0422 };
0423 
0424 static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
0425 {
0426     return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
0427 }
0428 
0429 static inline bool is_96xx_A0(struct pci_dev *pdev)
0430 {
0431     return (pdev->revision == 0x00) &&
0432         (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
0433 }
0434 
0435 static inline bool is_96xx_B0(struct pci_dev *pdev)
0436 {
0437     return (pdev->revision == 0x01) &&
0438         (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
0439 }
0440 
0441 /* REVID for PCIe devices.
0442  * Bits 0..1: minor pass, bit 3..2: major pass
0443  * bits 7..4: midr id
0444  */
0445 #define PCI_REVISION_ID_96XX        0x00
0446 #define PCI_REVISION_ID_95XX        0x10
0447 #define PCI_REVISION_ID_95XXN       0x20
0448 #define PCI_REVISION_ID_98XX        0x30
0449 #define PCI_REVISION_ID_95XXMM      0x40
0450 #define PCI_REVISION_ID_95XXO       0xE0
0451 
0452 static inline bool is_dev_otx2(struct pci_dev *pdev)
0453 {
0454     u8 midr = pdev->revision & 0xF0;
0455 
0456     return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
0457         midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
0458         midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
0459 }
0460 
0461 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
0462 {
0463     struct otx2_hw *hw = &pfvf->hw;
0464 
0465     pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
0466     pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
0467     pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
0468 
0469     __set_bit(HW_TSO, &hw->cap_flag);
0470 
0471     if (is_96xx_A0(pfvf->pdev)) {
0472         __clear_bit(HW_TSO, &hw->cap_flag);
0473 
0474         /* Time based irq coalescing is not supported */
0475         pfvf->hw.cq_qcount_wait = 0x0;
0476 
0477         /* Due to HW issue previous silicons required minimum
0478          * 600 unused CQE to avoid CQ overflow.
0479          */
0480         pfvf->hw.rq_skid = 600;
0481         pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
0482     }
0483     if (is_96xx_B0(pfvf->pdev))
0484         __clear_bit(HW_TSO, &hw->cap_flag);
0485 
0486     if (!is_dev_otx2(pfvf->pdev)) {
0487         __set_bit(CN10K_MBOX, &hw->cap_flag);
0488         __set_bit(CN10K_LMTST, &hw->cap_flag);
0489         __set_bit(CN10K_RPM, &hw->cap_flag);
0490     }
0491 }
0492 
0493 /* Register read/write APIs */
0494 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
0495 {
0496     u64 blkaddr;
0497 
0498     switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
0499     case BLKTYPE_NIX:
0500         blkaddr = nic->nix_blkaddr;
0501         break;
0502     case BLKTYPE_NPA:
0503         blkaddr = BLKADDR_NPA;
0504         break;
0505     default:
0506         blkaddr = BLKADDR_RVUM;
0507         break;
0508     }
0509 
0510     offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
0511     offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
0512 
0513     return nic->reg_base + offset;
0514 }
0515 
0516 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
0517 {
0518     void __iomem *addr = otx2_get_regaddr(nic, offset);
0519 
0520     writeq(val, addr);
0521 }
0522 
0523 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
0524 {
0525     void __iomem *addr = otx2_get_regaddr(nic, offset);
0526 
0527     return readq(addr);
0528 }
0529 
0530 /* Mbox bounce buffer APIs */
0531 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
0532 {
0533     struct otx2_mbox *otx2_mbox;
0534     struct otx2_mbox_dev *mdev;
0535 
0536     mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
0537     if (!mbox->bbuf_base)
0538         return -ENOMEM;
0539 
0540     /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
0541      * prepare all mbox messages in bounce buffer instead of directly
0542      * in hw mbox memory.
0543      */
0544     otx2_mbox = &mbox->mbox;
0545     mdev = &otx2_mbox->dev[0];
0546     mdev->mbase = mbox->bbuf_base;
0547 
0548     otx2_mbox = &mbox->mbox_up;
0549     mdev = &otx2_mbox->dev[0];
0550     mdev->mbase = mbox->bbuf_base;
0551     return 0;
0552 }
0553 
0554 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
0555 {
0556     u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
0557     void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
0558     struct otx2_mbox_dev *mdev = &mbox->dev[devid];
0559     struct mbox_hdr *hdr;
0560     u64 msg_size;
0561 
0562     if (mdev->mbase == hw_mbase)
0563         return;
0564 
0565     hdr = hw_mbase + mbox->rx_start;
0566     msg_size = hdr->msg_size;
0567 
0568     if (msg_size > mbox->rx_size - msgs_offset)
0569         msg_size = mbox->rx_size - msgs_offset;
0570 
0571     /* Copy mbox messages from mbox memory to bounce buffer */
0572     memcpy(mdev->mbase + mbox->rx_start,
0573            hw_mbase + mbox->rx_start, msg_size + msgs_offset);
0574 }
0575 
0576 /* With the absence of API for 128-bit IO memory access for arm64,
0577  * implement required operations at place.
0578  */
0579 #if defined(CONFIG_ARM64)
0580 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
0581 {
0582     __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
0583              ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
0584 }
0585 
0586 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
0587 {
0588     u64 result;
0589 
0590     __asm__ volatile(".cpu   generic+lse\n"
0591              "ldadd %x[i], %x[r], [%[b]]"
0592              : [r]"=r"(result), "+m"(*ptr)
0593              : [i]"r"(incr), [b]"r"(ptr)
0594              : "memory");
0595     return result;
0596 }
0597 
0598 #else
0599 #define otx2_write128(lo, hi, addr)     writeq((hi) | (lo), addr)
0600 #define otx2_atomic64_add(incr, ptr)        ({ *ptr += incr; })
0601 #endif
0602 
0603 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
0604                     u64 *ptrs, u64 num_ptrs)
0605 {
0606     struct otx2_lmt_info *lmt_info;
0607     u64 size = 0, count_eot = 0;
0608     u64 tar_addr, val = 0;
0609 
0610     lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
0611     tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
0612     /* LMTID is same as AURA Id */
0613     val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
0614     /* Set if [127:64] of last 128bit word has a valid pointer */
0615     count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
0616     /* Set AURA ID to free pointer */
0617     ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
0618     /* Target address for LMTST flush tells HW how many 128bit
0619      * words are valid from NPA_LF_AURA_BATCH_FREE0.
0620      *
0621      * tar_addr[6:4] is LMTST size-1 in units of 128b.
0622      */
0623     if (num_ptrs > 2) {
0624         size = (sizeof(u64) * num_ptrs) / 16;
0625         if (!count_eot)
0626             size++;
0627         tar_addr |=  ((size - 1) & 0x7) << 4;
0628     }
0629     dma_wmb();
0630     memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
0631     /* Perform LMTST flush */
0632     cn10k_lmt_flush(val, tar_addr);
0633 }
0634 
0635 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
0636 {
0637     struct otx2_nic *pfvf = dev;
0638     u64 ptrs[2];
0639 
0640     ptrs[1] = buf;
0641     /* Free only one buffer at time during init and teardown */
0642     __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
0643 }
0644 
0645 /* Alloc pointer from pool/aura */
0646 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
0647 {
0648     u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
0649                NPA_LF_AURA_OP_ALLOCX(0));
0650     u64 incr = (u64)aura | BIT_ULL(63);
0651 
0652     return otx2_atomic64_add(incr, ptr);
0653 }
0654 
0655 /* Free pointer to a pool/aura */
0656 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
0657 {
0658     struct otx2_nic *pfvf = dev;
0659     void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
0660 
0661     otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
0662 }
0663 
0664 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
0665 {
0666     if (type == AURA_NIX_SQ)
0667         return pfvf->hw.rqpool_cnt + idx;
0668 
0669      /* AURA_NIX_RQ */
0670     return idx;
0671 }
0672 
0673 /* Mbox APIs */
0674 static inline int otx2_sync_mbox_msg(struct mbox *mbox)
0675 {
0676     int err;
0677 
0678     if (!otx2_mbox_nonempty(&mbox->mbox, 0))
0679         return 0;
0680     otx2_mbox_msg_send(&mbox->mbox, 0);
0681     err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
0682     if (err)
0683         return err;
0684 
0685     return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
0686 }
0687 
0688 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
0689 {
0690     int err;
0691 
0692     if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
0693         return 0;
0694     otx2_mbox_msg_send(&mbox->mbox_up, devid);
0695     err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
0696     if (err)
0697         return err;
0698 
0699     return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
0700 }
0701 
0702 /* Use this API to send mbox msgs in atomic context
0703  * where sleeping is not allowed
0704  */
0705 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
0706 {
0707     int err;
0708 
0709     if (!otx2_mbox_nonempty(&mbox->mbox, 0))
0710         return 0;
0711     otx2_mbox_msg_send(&mbox->mbox, 0);
0712     err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
0713     if (err)
0714         return err;
0715 
0716     return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
0717 }
0718 
0719 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
0720 static struct _req_type __maybe_unused                  \
0721 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox)                    \
0722 {                                   \
0723     struct _req_type *req;                      \
0724                                     \
0725     req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(      \
0726         &mbox->mbox, 0, sizeof(struct _req_type),       \
0727         sizeof(struct _rsp_type));              \
0728     if (!req)                           \
0729         return NULL;                        \
0730     req->hdr.sig = OTX2_MBOX_REQ_SIG;               \
0731     req->hdr.id = _id;                      \
0732     trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req));   \
0733     return req;                         \
0734 }
0735 
0736 MBOX_MESSAGES
0737 #undef M
0738 
0739 #define M(_name, _id, _fn_name, _req_type, _rsp_type)           \
0740 int                                 \
0741 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf,        \
0742                 struct _req_type *req,          \
0743                 struct _rsp_type *rsp);         \
0744 
0745 MBOX_UP_CGX_MESSAGES
0746 #undef M
0747 
0748 /* Time to wait before watchdog kicks off */
0749 #define OTX2_TX_TIMEOUT     (100 * HZ)
0750 
0751 #define RVU_PFVF_PF_SHIFT   10
0752 #define RVU_PFVF_PF_MASK    0x3F
0753 #define RVU_PFVF_FUNC_SHIFT 0
0754 #define RVU_PFVF_FUNC_MASK  0x3FF
0755 
0756 static inline bool is_otx2_vf(u16 pcifunc)
0757 {
0758     return !!(pcifunc & RVU_PFVF_FUNC_MASK);
0759 }
0760 
0761 static inline int rvu_get_pf(u16 pcifunc)
0762 {
0763     return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
0764 }
0765 
0766 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
0767                        struct page *page,
0768                        size_t offset, size_t size,
0769                        enum dma_data_direction dir)
0770 {
0771     dma_addr_t iova;
0772 
0773     iova = dma_map_page_attrs(pfvf->dev, page,
0774                   offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
0775     if (unlikely(dma_mapping_error(pfvf->dev, iova)))
0776         return (dma_addr_t)NULL;
0777     return iova;
0778 }
0779 
0780 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
0781                        dma_addr_t addr, size_t size,
0782                        enum dma_data_direction dir)
0783 {
0784     dma_unmap_page_attrs(pfvf->dev, addr, size,
0785                  dir, DMA_ATTR_SKIP_CPU_SYNC);
0786 }
0787 
0788 /* MSI-X APIs */
0789 void otx2_free_cints(struct otx2_nic *pfvf, int n);
0790 void otx2_set_cints_affinity(struct otx2_nic *pfvf);
0791 int otx2_set_mac_address(struct net_device *netdev, void *p);
0792 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
0793 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
0794 void otx2_get_mac_from_af(struct net_device *netdev);
0795 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
0796 int otx2_config_pause_frm(struct otx2_nic *pfvf);
0797 void otx2_setup_segmentation(struct otx2_nic *pfvf);
0798 
0799 /* RVU block related APIs */
0800 int otx2_attach_npa_nix(struct otx2_nic *pfvf);
0801 int otx2_detach_resources(struct mbox *mbox);
0802 int otx2_config_npa(struct otx2_nic *pfvf);
0803 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
0804 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
0805 void otx2_aura_pool_free(struct otx2_nic *pfvf);
0806 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
0807 void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
0808 int otx2_config_nix(struct otx2_nic *pfvf);
0809 int otx2_config_nix_queues(struct otx2_nic *pfvf);
0810 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
0811 int otx2_txsch_alloc(struct otx2_nic *pfvf);
0812 int otx2_txschq_stop(struct otx2_nic *pfvf);
0813 void otx2_sqb_flush(struct otx2_nic *pfvf);
0814 int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
0815               dma_addr_t *dma);
0816 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
0817 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
0818 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
0819 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
0820 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
0821 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
0822 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
0823 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
0824               dma_addr_t *dma);
0825 
0826 /* RSS configuration APIs*/
0827 int otx2_rss_init(struct otx2_nic *pfvf);
0828 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
0829 void otx2_set_rss_key(struct otx2_nic *pfvf);
0830 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
0831 
0832 /* Mbox handlers */
0833 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
0834                   struct msix_offset_rsp *rsp);
0835 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
0836                    struct npa_lf_alloc_rsp *rsp);
0837 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
0838                    struct nix_lf_alloc_rsp *rsp);
0839 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
0840                   struct nix_txsch_alloc_rsp *rsp);
0841 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
0842                 struct cgx_stats_rsp *rsp);
0843 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
0844                 struct cgx_fec_stats_rsp *rsp);
0845 void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
0846 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
0847                 struct nix_bp_cfg_rsp *rsp);
0848 
0849 /* Device stats APIs */
0850 void otx2_get_dev_stats(struct otx2_nic *pfvf);
0851 void otx2_get_stats64(struct net_device *netdev,
0852               struct rtnl_link_stats64 *stats);
0853 void otx2_update_lmac_stats(struct otx2_nic *pfvf);
0854 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
0855 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
0856 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
0857 void otx2_set_ethtool_ops(struct net_device *netdev);
0858 void otx2vf_set_ethtool_ops(struct net_device *netdev);
0859 
0860 int otx2_open(struct net_device *netdev);
0861 int otx2_stop(struct net_device *netdev);
0862 int otx2_set_real_num_queues(struct net_device *netdev,
0863                  int tx_queues, int rx_queues);
0864 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
0865 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
0866 
0867 /* MCAM filter related APIs */
0868 int otx2_mcam_flow_init(struct otx2_nic *pf);
0869 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
0870 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
0871 void otx2_mcam_flow_del(struct otx2_nic *pf);
0872 int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
0873 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
0874 int otx2_get_flow(struct otx2_nic *pfvf,
0875           struct ethtool_rxnfc *nfc, u32 location);
0876 int otx2_get_all_flows(struct otx2_nic *pfvf,
0877                struct ethtool_rxnfc *nfc, u32 *rule_locs);
0878 int otx2_add_flow(struct otx2_nic *pfvf,
0879           struct ethtool_rxnfc *nfc);
0880 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
0881 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
0882 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
0883 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
0884 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
0885 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
0886 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
0887 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
0888 u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
0889 int otx2_handle_ntuple_tc_features(struct net_device *netdev,
0890                    netdev_features_t features);
0891 /* tc support */
0892 int otx2_init_tc(struct otx2_nic *nic);
0893 void otx2_shutdown_tc(struct otx2_nic *nic);
0894 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
0895           void *type_data);
0896 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
0897 /* CGX/RPM DMAC filters support */
0898 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
0899 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
0900 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
0901 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
0902 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
0903 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
0904 
0905 #ifdef CONFIG_DCB
0906 /* DCB support*/
0907 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
0908 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
0909 int otx2_dcbnl_set_ops(struct net_device *dev);
0910 #endif
0911 #endif /* OTX2_COMMON_H */