Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RPM CN10K driver
0003  *
0004  * Copyright (C) 2020 Marvell.
0005  */
0006 
0007 #include <linux/bitfield.h>
0008 #include <linux/pci.h>
0009 #include "rvu.h"
0010 #include "cgx.h"
0011 #include "rvu_reg.h"
0012 
0013 /* RVU LMTST */
0014 #define LMT_TBL_OP_READ     0
0015 #define LMT_TBL_OP_WRITE    1
0016 #define LMT_MAP_TABLE_SIZE  (128 * 1024)
0017 #define LMT_MAPTBL_ENTRY_SIZE   16
0018 
0019 /* Function to perform operations (read/write) on lmtst map table */
0020 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
0021                    int lmt_tbl_op)
0022 {
0023     void __iomem *lmt_map_base;
0024     u64 tbl_base;
0025 
0026     tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
0027 
0028     lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
0029     if (!lmt_map_base) {
0030         dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
0031         return -ENOMEM;
0032     }
0033 
0034     if (lmt_tbl_op == LMT_TBL_OP_READ) {
0035         *val = readq(lmt_map_base + index);
0036     } else {
0037         writeq((*val), (lmt_map_base + index));
0038         /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
0039          * changes effective. Write 1 for flush and read is being used as a
0040          * barrier and sets up a data dependency. Write to 0 after a write
0041          * to 1 to complete the flush.
0042          */
0043         rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
0044         rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
0045         rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
0046     }
0047 
0048     iounmap(lmt_map_base);
0049     return 0;
0050 }
0051 
0052 #define LMT_MAP_TBL_W1_OFF  8
0053 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
0054 {
0055     return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
0056         (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
0057 }
0058 
0059 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
0060                u64 iova, u64 *lmt_addr)
0061 {
0062     u64 pa, val, pf;
0063     int err;
0064 
0065     if (!iova) {
0066         dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
0067         return -EINVAL;
0068     }
0069 
0070     rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
0071     pf = rvu_get_pf(pcifunc) & 0x1F;
0072     val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
0073           ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
0074     rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
0075 
0076     err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
0077     if (err) {
0078         dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
0079         return err;
0080     }
0081     val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
0082     if (val & ~0x1ULL) {
0083         dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
0084         return -EIO;
0085     }
0086     /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
0087      * PA[11:0] = IOVA[11:0]
0088      */
0089     pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
0090     pa &= GENMASK_ULL(39, 0);
0091     *lmt_addr = (pa << 12) | (iova  & 0xFFF);
0092 
0093     return 0;
0094 }
0095 
0096 static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
0097 {
0098     struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
0099     u32 tbl_idx;
0100     int err = 0;
0101     u64 val;
0102 
0103     /* Read the current lmt addr of pcifunc */
0104     tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
0105     err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
0106     if (err) {
0107         dev_err(rvu->dev,
0108             "Failed to read LMT map table: index 0x%x err %d\n",
0109             tbl_idx, err);
0110         return err;
0111     }
0112 
0113     /* Storing the seondary's lmt base address as this needs to be
0114      * reverted in FLR. Also making sure this default value doesn't
0115      * get overwritten on multiple calls to this mailbox.
0116      */
0117     if (!pfvf->lmt_base_addr)
0118         pfvf->lmt_base_addr = val;
0119 
0120     /* Update the LMT table with new addr */
0121     err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
0122     if (err) {
0123         dev_err(rvu->dev,
0124             "Failed to update LMT map table: index 0x%x err %d\n",
0125             tbl_idx, err);
0126         return err;
0127     }
0128     return 0;
0129 }
0130 
0131 int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
0132                      struct lmtst_tbl_setup_req *req,
0133                      struct msg_rsp *rsp)
0134 {
0135     struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
0136     u32 pri_tbl_idx, tbl_idx;
0137     u64 lmt_addr;
0138     int err = 0;
0139     u64 val;
0140 
0141     /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
0142      * region, if so, convert that IOVA to physical address and
0143      * populate LMT table with that address
0144      */
0145     if (req->use_local_lmt_region) {
0146         err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
0147                       req->lmt_iova, &lmt_addr);
0148         if (err < 0)
0149             return err;
0150 
0151         /* Update the lmt addr for this PFFUNC in the LMT table */
0152         err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
0153         if (err)
0154             return err;
0155     }
0156 
0157     /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
0158      * multiple PF_FUNCs to share an LMTLINE region, so primary/base
0159      * pcifunc (which is passed as an argument to mailbox) is the one
0160      * whose lmt base address will be shared among other secondary
0161      * pcifunc (will be the one who is calling this mailbox).
0162      */
0163     if (req->base_pcifunc) {
0164         /* Calculating the LMT table index equivalent to primary
0165          * pcifunc.
0166          */
0167         pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
0168 
0169         /* Read the base lmt addr of the primary pcifunc */
0170         err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
0171                       LMT_TBL_OP_READ);
0172         if (err) {
0173             dev_err(rvu->dev,
0174                 "Failed to read LMT map table: index 0x%x err %d\n",
0175                 pri_tbl_idx, err);
0176             goto error;
0177         }
0178 
0179         /* Update the base lmt addr of secondary with primary's base
0180          * lmt addr.
0181          */
0182         err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
0183         if (err)
0184             return err;
0185     }
0186 
0187     /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
0188      * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
0189      * early completion for ordered LMTST.
0190      */
0191     if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
0192         tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
0193         err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
0194                       &val, LMT_TBL_OP_READ);
0195         if (err) {
0196             dev_err(rvu->dev,
0197                 "Failed to read LMT map table: index 0x%x err %d\n",
0198                 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
0199             goto error;
0200         }
0201 
0202         /* Storing lmt map table entry word1 default value as this needs
0203          * to be reverted in FLR. Also making sure this default value
0204          * doesn't get overwritten on multiple calls to this mailbox.
0205          */
0206         if (!pfvf->lmt_map_ent_w1)
0207             pfvf->lmt_map_ent_w1 = val;
0208 
0209         /* Disable early completion for Ordered LMTSTs. */
0210         if (req->dis_sched_early_comp)
0211             val |= (req->dis_sched_early_comp <<
0212                 APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
0213         /* Enable scheduled LMTST */
0214         if (req->sch_ena)
0215             val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
0216                 req->ssow_pf_func;
0217         /* Disables LMTLINE prefetch before receiving store data. */
0218         if (req->dis_line_pref)
0219             val |= (req->dis_line_pref <<
0220                 APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
0221 
0222         err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
0223                       &val, LMT_TBL_OP_WRITE);
0224         if (err) {
0225             dev_err(rvu->dev,
0226                 "Failed to update LMT map table: index 0x%x err %d\n",
0227                 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
0228             goto error;
0229         }
0230     }
0231 
0232 error:
0233     return err;
0234 }
0235 
0236 /* Resetting the lmtst map table to original base addresses */
0237 void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
0238 {
0239     struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
0240     u32 tbl_idx;
0241     int err;
0242 
0243     if (is_rvu_otx2(rvu))
0244         return;
0245 
0246     if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
0247         /* This corresponds to lmt map table index */
0248         tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
0249         /* Reverting back original lmt base addr for respective
0250          * pcifunc.
0251          */
0252         if (pfvf->lmt_base_addr) {
0253             err = lmtst_map_table_ops(rvu, tbl_idx,
0254                           &pfvf->lmt_base_addr,
0255                           LMT_TBL_OP_WRITE);
0256             if (err)
0257                 dev_err(rvu->dev,
0258                     "Failed to update LMT map table: index 0x%x err %d\n",
0259                     tbl_idx, err);
0260             pfvf->lmt_base_addr = 0;
0261         }
0262         /* Reverting back to orginal word1 val of lmtst map table entry
0263          * which underwent changes.
0264          */
0265         if (pfvf->lmt_map_ent_w1) {
0266             err = lmtst_map_table_ops(rvu,
0267                           tbl_idx + LMT_MAP_TBL_W1_OFF,
0268                           &pfvf->lmt_map_ent_w1,
0269                           LMT_TBL_OP_WRITE);
0270             if (err)
0271                 dev_err(rvu->dev,
0272                     "Failed to update LMT map table: index 0x%x err %d\n",
0273                     tbl_idx + LMT_MAP_TBL_W1_OFF, err);
0274             pfvf->lmt_map_ent_w1 = 0;
0275         }
0276     }
0277 }
0278 
0279 int rvu_set_channels_base(struct rvu *rvu)
0280 {
0281     u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
0282     u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
0283     struct rvu_hwinfo *hw = rvu->hw;
0284     u64 nix_const, nix_const1;
0285     int blkaddr;
0286 
0287     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
0288     if (blkaddr < 0)
0289         return blkaddr;
0290 
0291     nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
0292     nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
0293 
0294     hw->cgx = (nix_const >> 12) & 0xFULL;
0295     hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
0296     hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
0297     hw->lbk_links = (nix_const >> 24) & 0xFULL;
0298     hw->cpt_links = (nix_const >> 44) & 0xFULL;
0299     hw->sdp_links = 1;
0300 
0301     hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
0302     hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
0303     hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
0304 
0305     /* No Programmable channels */
0306     if (!(nix_const & BIT_ULL(60)))
0307         return 0;
0308 
0309     hw->cap.programmable_chans = true;
0310 
0311     /* If programmable channels are present then configure
0312      * channels such that all channel numbers are contiguous
0313      * leaving no holes. This way the new CPT channels can be
0314      * accomodated. The order of channel numbers assigned is
0315      * LBK, SDP, CGX and CPT. Also the base channel number
0316      * of a block must be multiple of number of channels
0317      * of the block.
0318      */
0319     nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
0320     nr_sdp_chans = nix_const1 & 0xFFFULL;
0321     nr_cgx_chans = nix_const & 0xFFULL;
0322     nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
0323 
0324     sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
0325     /* Round up base channel to multiple of number of channels */
0326     hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
0327 
0328     cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
0329     hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
0330 
0331     cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
0332     hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
0333 
0334     /* Out of 4096 channels start CPT from 2048 so
0335      * that MSB for CPT channels is always set
0336      */
0337     if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
0338         hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
0339     } else {
0340         dev_err(rvu->dev,
0341             "CPT channels could not fit in the range 2048-4095\n");
0342         return -EINVAL;
0343     }
0344 
0345     return 0;
0346 }
0347 
0348 #define LBK_CONNECT_NIXX(a)     (0x0 + (a))
0349 
0350 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
0351                 u64 offset, int lbkid, u16 chans)
0352 {
0353     struct rvu_hwinfo *hw = rvu->hw;
0354     u64 cfg;
0355 
0356     cfg = readq(base + offset);
0357     cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
0358          LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
0359     cfg |=  FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
0360     cfg |=  FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
0361     cfg |=  FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
0362 
0363     writeq(cfg, base + offset);
0364 }
0365 
0366 static void rvu_lbk_set_channels(struct rvu *rvu)
0367 {
0368     struct pci_dev *pdev = NULL;
0369     void __iomem *base;
0370     u64 lbk_const;
0371     u8 src, dst;
0372     u16 chans;
0373 
0374     /* To loopback packets between multiple NIX blocks
0375      * mutliple LBK blocks are needed. With two NIX blocks,
0376      * four LBK blocks are needed and each LBK block
0377      * source and destination are as follows:
0378      * LBK0 - source NIX0 and destination NIX1
0379      * LBK1 - source NIX0 and destination NIX1
0380      * LBK2 - source NIX1 and destination NIX0
0381      * LBK3 - source NIX1 and destination NIX1
0382      * As per the HRM channel numbers should be programmed as:
0383      * P2X and X2P of LBK0 as same
0384      * P2X and X2P of LBK3 as same
0385      * P2X of LBK1 and X2P of LBK2 as same
0386      * P2X of LBK2 and X2P of LBK1 as same
0387      */
0388     while (true) {
0389         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
0390                       PCI_DEVID_OCTEONTX2_LBK, pdev);
0391         if (!pdev)
0392             return;
0393 
0394         base = pci_ioremap_bar(pdev, 0);
0395         if (!base)
0396             goto err_put;
0397 
0398         lbk_const = readq(base + LBK_CONST);
0399         chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
0400         dst = FIELD_GET(LBK_CONST_DST, lbk_const);
0401         src = FIELD_GET(LBK_CONST_SRC, lbk_const);
0402 
0403         if (src == dst) {
0404             if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
0405                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0406                             0, chans);
0407                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0408                             0, chans);
0409             } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
0410                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0411                             1, chans);
0412                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0413                             1, chans);
0414             }
0415         } else {
0416             if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
0417                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0418                             0, chans);
0419                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0420                             1, chans);
0421             } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
0422                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0423                             1, chans);
0424                 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0425                             0, chans);
0426             }
0427         }
0428         iounmap(base);
0429     }
0430 err_put:
0431     pci_dev_put(pdev);
0432 }
0433 
0434 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
0435 {
0436     u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
0437     u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
0438     u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
0439     struct rvu_hwinfo *hw = rvu->hw;
0440     int link, nix_link = 0;
0441     u16 start;
0442     u64 cfg;
0443 
0444     cgx_chans = nix_const & 0xFFULL;
0445     lbk_chans = (nix_const >> 16) & 0xFFULL;
0446     sdp_chans = nix_const1 & 0xFFFULL;
0447     cpt_chans = (nix_const >> 32) & 0xFFFULL;
0448 
0449     start = hw->cgx_chan_base;
0450     for (link = 0; link < hw->cgx_links; link++, nix_link++) {
0451         cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0452         cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0453         cfg |=  FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
0454         cfg |=  FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0455         rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0456         start += cgx_chans;
0457     }
0458 
0459     start = hw->lbk_chan_base;
0460     for (link = 0; link < hw->lbk_links; link++, nix_link++) {
0461         cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0462         cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0463         cfg |=  FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
0464         cfg |=  FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0465         rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0466         start += lbk_chans;
0467     }
0468 
0469     start = hw->sdp_chan_base;
0470     for (link = 0; link < hw->sdp_links; link++, nix_link++) {
0471         cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0472         cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0473         cfg |=  FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
0474         cfg |=  FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0475         rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0476         start += sdp_chans;
0477     }
0478 
0479     start = hw->cpt_chan_base;
0480     for (link = 0; link < hw->cpt_links; link++, nix_link++) {
0481         cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0482         cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0483         cfg |=  FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
0484         cfg |=  FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0485         rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0486         start += cpt_chans;
0487     }
0488 }
0489 
0490 static void rvu_nix_set_channels(struct rvu *rvu)
0491 {
0492     int blkaddr = 0;
0493 
0494     blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
0495     while (blkaddr) {
0496         __rvu_nix_set_channels(rvu, blkaddr);
0497         blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
0498     }
0499 }
0500 
0501 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
0502 {
0503     u64 cfg;
0504 
0505     cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
0506     cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
0507 
0508     /* There is no read-only constant register to read
0509      * the number of channels for LMAC and it is always 16.
0510      */
0511     cfg |=  FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
0512     cfg |=  FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
0513     cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
0514 }
0515 
0516 static void rvu_rpm_set_channels(struct rvu *rvu)
0517 {
0518     struct rvu_hwinfo *hw = rvu->hw;
0519     u16 base = hw->cgx_chan_base;
0520     int cgx, lmac;
0521 
0522     for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
0523         for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
0524             __rvu_rpm_set_channels(cgx, lmac, base);
0525             base += 16;
0526         }
0527     }
0528 }
0529 
0530 void rvu_program_channels(struct rvu *rvu)
0531 {
0532     struct rvu_hwinfo *hw = rvu->hw;
0533 
0534     if (!hw->cap.programmable_chans)
0535         return;
0536 
0537     rvu_nix_set_channels(rvu);
0538     rvu_lbk_set_channels(rvu);
0539     rvu_rpm_set_channels(rvu);
0540 }