0001
0002
0003
0004
0005
0006
0007 #include <linux/bitfield.h>
0008 #include <linux/pci.h>
0009 #include "rvu.h"
0010 #include "cgx.h"
0011 #include "rvu_reg.h"
0012
0013
0014 #define LMT_TBL_OP_READ 0
0015 #define LMT_TBL_OP_WRITE 1
0016 #define LMT_MAP_TABLE_SIZE (128 * 1024)
0017 #define LMT_MAPTBL_ENTRY_SIZE 16
0018
0019
0020 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
0021 int lmt_tbl_op)
0022 {
0023 void __iomem *lmt_map_base;
0024 u64 tbl_base;
0025
0026 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
0027
0028 lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
0029 if (!lmt_map_base) {
0030 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
0031 return -ENOMEM;
0032 }
0033
0034 if (lmt_tbl_op == LMT_TBL_OP_READ) {
0035 *val = readq(lmt_map_base + index);
0036 } else {
0037 writeq((*val), (lmt_map_base + index));
0038
0039
0040
0041
0042
0043 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
0044 rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
0045 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
0046 }
0047
0048 iounmap(lmt_map_base);
0049 return 0;
0050 }
0051
0052 #define LMT_MAP_TBL_W1_OFF 8
0053 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
0054 {
0055 return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
0056 (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
0057 }
0058
0059 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
0060 u64 iova, u64 *lmt_addr)
0061 {
0062 u64 pa, val, pf;
0063 int err;
0064
0065 if (!iova) {
0066 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
0067 return -EINVAL;
0068 }
0069
0070 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
0071 pf = rvu_get_pf(pcifunc) & 0x1F;
0072 val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
0073 ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
0074 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
0075
0076 err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
0077 if (err) {
0078 dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
0079 return err;
0080 }
0081 val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
0082 if (val & ~0x1ULL) {
0083 dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
0084 return -EIO;
0085 }
0086
0087
0088
0089 pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
0090 pa &= GENMASK_ULL(39, 0);
0091 *lmt_addr = (pa << 12) | (iova & 0xFFF);
0092
0093 return 0;
0094 }
0095
0096 static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
0097 {
0098 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
0099 u32 tbl_idx;
0100 int err = 0;
0101 u64 val;
0102
0103
0104 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
0105 err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
0106 if (err) {
0107 dev_err(rvu->dev,
0108 "Failed to read LMT map table: index 0x%x err %d\n",
0109 tbl_idx, err);
0110 return err;
0111 }
0112
0113
0114
0115
0116
0117 if (!pfvf->lmt_base_addr)
0118 pfvf->lmt_base_addr = val;
0119
0120
0121 err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
0122 if (err) {
0123 dev_err(rvu->dev,
0124 "Failed to update LMT map table: index 0x%x err %d\n",
0125 tbl_idx, err);
0126 return err;
0127 }
0128 return 0;
0129 }
0130
0131 int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
0132 struct lmtst_tbl_setup_req *req,
0133 struct msg_rsp *rsp)
0134 {
0135 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
0136 u32 pri_tbl_idx, tbl_idx;
0137 u64 lmt_addr;
0138 int err = 0;
0139 u64 val;
0140
0141
0142
0143
0144
0145 if (req->use_local_lmt_region) {
0146 err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
0147 req->lmt_iova, &lmt_addr);
0148 if (err < 0)
0149 return err;
0150
0151
0152 err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
0153 if (err)
0154 return err;
0155 }
0156
0157
0158
0159
0160
0161
0162
0163 if (req->base_pcifunc) {
0164
0165
0166
0167 pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
0168
0169
0170 err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
0171 LMT_TBL_OP_READ);
0172 if (err) {
0173 dev_err(rvu->dev,
0174 "Failed to read LMT map table: index 0x%x err %d\n",
0175 pri_tbl_idx, err);
0176 goto error;
0177 }
0178
0179
0180
0181
0182 err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
0183 if (err)
0184 return err;
0185 }
0186
0187
0188
0189
0190
0191 if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
0192 tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
0193 err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
0194 &val, LMT_TBL_OP_READ);
0195 if (err) {
0196 dev_err(rvu->dev,
0197 "Failed to read LMT map table: index 0x%x err %d\n",
0198 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
0199 goto error;
0200 }
0201
0202
0203
0204
0205
0206 if (!pfvf->lmt_map_ent_w1)
0207 pfvf->lmt_map_ent_w1 = val;
0208
0209
0210 if (req->dis_sched_early_comp)
0211 val |= (req->dis_sched_early_comp <<
0212 APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
0213
0214 if (req->sch_ena)
0215 val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
0216 req->ssow_pf_func;
0217
0218 if (req->dis_line_pref)
0219 val |= (req->dis_line_pref <<
0220 APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
0221
0222 err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
0223 &val, LMT_TBL_OP_WRITE);
0224 if (err) {
0225 dev_err(rvu->dev,
0226 "Failed to update LMT map table: index 0x%x err %d\n",
0227 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
0228 goto error;
0229 }
0230 }
0231
0232 error:
0233 return err;
0234 }
0235
0236
0237 void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
0238 {
0239 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
0240 u32 tbl_idx;
0241 int err;
0242
0243 if (is_rvu_otx2(rvu))
0244 return;
0245
0246 if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
0247
0248 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
0249
0250
0251
0252 if (pfvf->lmt_base_addr) {
0253 err = lmtst_map_table_ops(rvu, tbl_idx,
0254 &pfvf->lmt_base_addr,
0255 LMT_TBL_OP_WRITE);
0256 if (err)
0257 dev_err(rvu->dev,
0258 "Failed to update LMT map table: index 0x%x err %d\n",
0259 tbl_idx, err);
0260 pfvf->lmt_base_addr = 0;
0261 }
0262
0263
0264
0265 if (pfvf->lmt_map_ent_w1) {
0266 err = lmtst_map_table_ops(rvu,
0267 tbl_idx + LMT_MAP_TBL_W1_OFF,
0268 &pfvf->lmt_map_ent_w1,
0269 LMT_TBL_OP_WRITE);
0270 if (err)
0271 dev_err(rvu->dev,
0272 "Failed to update LMT map table: index 0x%x err %d\n",
0273 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
0274 pfvf->lmt_map_ent_w1 = 0;
0275 }
0276 }
0277 }
0278
0279 int rvu_set_channels_base(struct rvu *rvu)
0280 {
0281 u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
0282 u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
0283 struct rvu_hwinfo *hw = rvu->hw;
0284 u64 nix_const, nix_const1;
0285 int blkaddr;
0286
0287 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
0288 if (blkaddr < 0)
0289 return blkaddr;
0290
0291 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
0292 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
0293
0294 hw->cgx = (nix_const >> 12) & 0xFULL;
0295 hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
0296 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
0297 hw->lbk_links = (nix_const >> 24) & 0xFULL;
0298 hw->cpt_links = (nix_const >> 44) & 0xFULL;
0299 hw->sdp_links = 1;
0300
0301 hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
0302 hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
0303 hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
0304
0305
0306 if (!(nix_const & BIT_ULL(60)))
0307 return 0;
0308
0309 hw->cap.programmable_chans = true;
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
0320 nr_sdp_chans = nix_const1 & 0xFFFULL;
0321 nr_cgx_chans = nix_const & 0xFFULL;
0322 nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
0323
0324 sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
0325
0326 hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
0327
0328 cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
0329 hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
0330
0331 cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
0332 hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
0333
0334
0335
0336
0337 if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
0338 hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
0339 } else {
0340 dev_err(rvu->dev,
0341 "CPT channels could not fit in the range 2048-4095\n");
0342 return -EINVAL;
0343 }
0344
0345 return 0;
0346 }
0347
0348 #define LBK_CONNECT_NIXX(a) (0x0 + (a))
0349
0350 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
0351 u64 offset, int lbkid, u16 chans)
0352 {
0353 struct rvu_hwinfo *hw = rvu->hw;
0354 u64 cfg;
0355
0356 cfg = readq(base + offset);
0357 cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
0358 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
0359 cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
0360 cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
0361 cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
0362
0363 writeq(cfg, base + offset);
0364 }
0365
0366 static void rvu_lbk_set_channels(struct rvu *rvu)
0367 {
0368 struct pci_dev *pdev = NULL;
0369 void __iomem *base;
0370 u64 lbk_const;
0371 u8 src, dst;
0372 u16 chans;
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388 while (true) {
0389 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
0390 PCI_DEVID_OCTEONTX2_LBK, pdev);
0391 if (!pdev)
0392 return;
0393
0394 base = pci_ioremap_bar(pdev, 0);
0395 if (!base)
0396 goto err_put;
0397
0398 lbk_const = readq(base + LBK_CONST);
0399 chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
0400 dst = FIELD_GET(LBK_CONST_DST, lbk_const);
0401 src = FIELD_GET(LBK_CONST_SRC, lbk_const);
0402
0403 if (src == dst) {
0404 if (src == LBK_CONNECT_NIXX(0)) {
0405 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0406 0, chans);
0407 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0408 0, chans);
0409 } else if (src == LBK_CONNECT_NIXX(1)) {
0410 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0411 1, chans);
0412 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0413 1, chans);
0414 }
0415 } else {
0416 if (src == LBK_CONNECT_NIXX(0)) {
0417 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0418 0, chans);
0419 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0420 1, chans);
0421 } else if (src == LBK_CONNECT_NIXX(1)) {
0422 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
0423 1, chans);
0424 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
0425 0, chans);
0426 }
0427 }
0428 iounmap(base);
0429 }
0430 err_put:
0431 pci_dev_put(pdev);
0432 }
0433
0434 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
0435 {
0436 u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
0437 u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
0438 u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
0439 struct rvu_hwinfo *hw = rvu->hw;
0440 int link, nix_link = 0;
0441 u16 start;
0442 u64 cfg;
0443
0444 cgx_chans = nix_const & 0xFFULL;
0445 lbk_chans = (nix_const >> 16) & 0xFFULL;
0446 sdp_chans = nix_const1 & 0xFFFULL;
0447 cpt_chans = (nix_const >> 32) & 0xFFFULL;
0448
0449 start = hw->cgx_chan_base;
0450 for (link = 0; link < hw->cgx_links; link++, nix_link++) {
0451 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0452 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0453 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
0454 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0455 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0456 start += cgx_chans;
0457 }
0458
0459 start = hw->lbk_chan_base;
0460 for (link = 0; link < hw->lbk_links; link++, nix_link++) {
0461 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0462 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0463 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
0464 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0465 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0466 start += lbk_chans;
0467 }
0468
0469 start = hw->sdp_chan_base;
0470 for (link = 0; link < hw->sdp_links; link++, nix_link++) {
0471 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0472 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0473 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
0474 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0475 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0476 start += sdp_chans;
0477 }
0478
0479 start = hw->cpt_chan_base;
0480 for (link = 0; link < hw->cpt_links; link++, nix_link++) {
0481 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
0482 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
0483 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
0484 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
0485 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
0486 start += cpt_chans;
0487 }
0488 }
0489
0490 static void rvu_nix_set_channels(struct rvu *rvu)
0491 {
0492 int blkaddr = 0;
0493
0494 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
0495 while (blkaddr) {
0496 __rvu_nix_set_channels(rvu, blkaddr);
0497 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
0498 }
0499 }
0500
0501 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
0502 {
0503 u64 cfg;
0504
0505 cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
0506 cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
0507
0508
0509
0510
0511 cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
0512 cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
0513 cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
0514 }
0515
0516 static void rvu_rpm_set_channels(struct rvu *rvu)
0517 {
0518 struct rvu_hwinfo *hw = rvu->hw;
0519 u16 base = hw->cgx_chan_base;
0520 int cgx, lmac;
0521
0522 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
0523 for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
0524 __rvu_rpm_set_channels(cgx, lmac, base);
0525 base += 16;
0526 }
0527 }
0528 }
0529
0530 void rvu_program_channels(struct rvu *rvu)
0531 {
0532 struct rvu_hwinfo *hw = rvu->hw;
0533
0534 if (!hw->cap.programmable_chans)
0535 return;
0536
0537 rvu_nix_set_channels(rvu);
0538 rvu_lbk_set_channels(rvu);
0539 rvu_rpm_set_channels(rvu);
0540 }