Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Admin Function driver
0003  *
0004  * Copyright (C) 2019 Marvell.
0005  *
0006  */
0007 
0008 #ifdef CONFIG_DEBUG_FS
0009 
0010 #include <linux/fs.h>
0011 #include <linux/debugfs.h>
0012 #include <linux/module.h>
0013 #include <linux/pci.h>
0014 
0015 #include "rvu_struct.h"
0016 #include "rvu_reg.h"
0017 #include "rvu.h"
0018 #include "cgx.h"
0019 #include "lmac_common.h"
0020 #include "npc.h"
0021 #include "rvu_npc_hash.h"
0022 
0023 #define DEBUGFS_DIR_NAME "octeontx2"
0024 
0025 enum {
0026     CGX_STAT0,
0027     CGX_STAT1,
0028     CGX_STAT2,
0029     CGX_STAT3,
0030     CGX_STAT4,
0031     CGX_STAT5,
0032     CGX_STAT6,
0033     CGX_STAT7,
0034     CGX_STAT8,
0035     CGX_STAT9,
0036     CGX_STAT10,
0037     CGX_STAT11,
0038     CGX_STAT12,
0039     CGX_STAT13,
0040     CGX_STAT14,
0041     CGX_STAT15,
0042     CGX_STAT16,
0043     CGX_STAT17,
0044     CGX_STAT18,
0045 };
0046 
0047 /* NIX TX stats */
0048 enum nix_stat_lf_tx {
0049     TX_UCAST    = 0x0,
0050     TX_BCAST    = 0x1,
0051     TX_MCAST    = 0x2,
0052     TX_DROP     = 0x3,
0053     TX_OCTS     = 0x4,
0054     TX_STATS_ENUM_LAST,
0055 };
0056 
0057 /* NIX RX stats */
0058 enum nix_stat_lf_rx {
0059     RX_OCTS     = 0x0,
0060     RX_UCAST    = 0x1,
0061     RX_BCAST    = 0x2,
0062     RX_MCAST    = 0x3,
0063     RX_DROP     = 0x4,
0064     RX_DROP_OCTS    = 0x5,
0065     RX_FCS      = 0x6,
0066     RX_ERR      = 0x7,
0067     RX_DRP_BCAST    = 0x8,
0068     RX_DRP_MCAST    = 0x9,
0069     RX_DRP_L3BCAST  = 0xa,
0070     RX_DRP_L3MCAST  = 0xb,
0071     RX_STATS_ENUM_LAST,
0072 };
0073 
0074 static char *cgx_rx_stats_fields[] = {
0075     [CGX_STAT0] = "Received packets",
0076     [CGX_STAT1] = "Octets of received packets",
0077     [CGX_STAT2] = "Received PAUSE packets",
0078     [CGX_STAT3] = "Received PAUSE and control packets",
0079     [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
0080     [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
0081     [CGX_STAT6] = "Packets dropped due to RX FIFO full",
0082     [CGX_STAT7] = "Octets dropped due to RX FIFO full",
0083     [CGX_STAT8] = "Error packets",
0084     [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
0085     [CGX_STAT10]    = "Filtered DMAC1 (NCSI-bound) octets",
0086     [CGX_STAT11]    = "NCSI-bound packets dropped",
0087     [CGX_STAT12]    = "NCSI-bound octets dropped",
0088 };
0089 
0090 static char *cgx_tx_stats_fields[] = {
0091     [CGX_STAT0] = "Packets dropped due to excessive collisions",
0092     [CGX_STAT1] = "Packets dropped due to excessive deferral",
0093     [CGX_STAT2] = "Multiple collisions before successful transmission",
0094     [CGX_STAT3] = "Single collisions before successful transmission",
0095     [CGX_STAT4] = "Total octets sent on the interface",
0096     [CGX_STAT5] = "Total frames sent on the interface",
0097     [CGX_STAT6] = "Packets sent with an octet count < 64",
0098     [CGX_STAT7] = "Packets sent with an octet count == 64",
0099     [CGX_STAT8] = "Packets sent with an octet count of 65-127",
0100     [CGX_STAT9] = "Packets sent with an octet count of 128-255",
0101     [CGX_STAT10]    = "Packets sent with an octet count of 256-511",
0102     [CGX_STAT11]    = "Packets sent with an octet count of 512-1023",
0103     [CGX_STAT12]    = "Packets sent with an octet count of 1024-1518",
0104     [CGX_STAT13]    = "Packets sent with an octet count of > 1518",
0105     [CGX_STAT14]    = "Packets sent to a broadcast DMAC",
0106     [CGX_STAT15]    = "Packets sent to the multicast DMAC",
0107     [CGX_STAT16]    = "Transmit underflow and were truncated",
0108     [CGX_STAT17]    = "Control/PAUSE packets sent",
0109 };
0110 
0111 static char *rpm_rx_stats_fields[] = {
0112     "Octets of received packets",
0113     "Octets of received packets with out error",
0114     "Received packets with alignment errors",
0115     "Control/PAUSE packets received",
0116     "Packets received with Frame too long Errors",
0117     "Packets received with a1nrange length Errors",
0118     "Received packets",
0119     "Packets received with FrameCheckSequenceErrors",
0120     "Packets received with VLAN header",
0121     "Error packets",
0122     "Packets received with unicast DMAC",
0123     "Packets received with multicast DMAC",
0124     "Packets received with broadcast DMAC",
0125     "Dropped packets",
0126     "Total frames received on interface",
0127     "Packets received with an octet count < 64",
0128     "Packets received with an octet count == 64",
0129     "Packets received with an octet count of 65-127",
0130     "Packets received with an octet count of 128-255",
0131     "Packets received with an octet count of 256-511",
0132     "Packets received with an octet count of 512-1023",
0133     "Packets received with an octet count of 1024-1518",
0134     "Packets received with an octet count of > 1518",
0135     "Oversized Packets",
0136     "Jabber Packets",
0137     "Fragmented Packets",
0138     "CBFC(class based flow control) pause frames received for class 0",
0139     "CBFC pause frames received for class 1",
0140     "CBFC pause frames received for class 2",
0141     "CBFC pause frames received for class 3",
0142     "CBFC pause frames received for class 4",
0143     "CBFC pause frames received for class 5",
0144     "CBFC pause frames received for class 6",
0145     "CBFC pause frames received for class 7",
0146     "CBFC pause frames received for class 8",
0147     "CBFC pause frames received for class 9",
0148     "CBFC pause frames received for class 10",
0149     "CBFC pause frames received for class 11",
0150     "CBFC pause frames received for class 12",
0151     "CBFC pause frames received for class 13",
0152     "CBFC pause frames received for class 14",
0153     "CBFC pause frames received for class 15",
0154     "MAC control packets received",
0155 };
0156 
0157 static char *rpm_tx_stats_fields[] = {
0158     "Total octets sent on the interface",
0159     "Total octets transmitted OK",
0160     "Control/Pause frames sent",
0161     "Total frames transmitted OK",
0162     "Total frames sent with VLAN header",
0163     "Error Packets",
0164     "Packets sent to unicast DMAC",
0165     "Packets sent to the multicast DMAC",
0166     "Packets sent to a broadcast DMAC",
0167     "Packets sent with an octet count == 64",
0168     "Packets sent with an octet count of 65-127",
0169     "Packets sent with an octet count of 128-255",
0170     "Packets sent with an octet count of 256-511",
0171     "Packets sent with an octet count of 512-1023",
0172     "Packets sent with an octet count of 1024-1518",
0173     "Packets sent with an octet count of > 1518",
0174     "CBFC(class based flow control) pause frames transmitted for class 0",
0175     "CBFC pause frames transmitted for class 1",
0176     "CBFC pause frames transmitted for class 2",
0177     "CBFC pause frames transmitted for class 3",
0178     "CBFC pause frames transmitted for class 4",
0179     "CBFC pause frames transmitted for class 5",
0180     "CBFC pause frames transmitted for class 6",
0181     "CBFC pause frames transmitted for class 7",
0182     "CBFC pause frames transmitted for class 8",
0183     "CBFC pause frames transmitted for class 9",
0184     "CBFC pause frames transmitted for class 10",
0185     "CBFC pause frames transmitted for class 11",
0186     "CBFC pause frames transmitted for class 12",
0187     "CBFC pause frames transmitted for class 13",
0188     "CBFC pause frames transmitted for class 14",
0189     "CBFC pause frames transmitted for class 15",
0190     "MAC control packets sent",
0191     "Total frames sent on the interface"
0192 };
0193 
0194 enum cpt_eng_type {
0195     CPT_AE_TYPE = 1,
0196     CPT_SE_TYPE = 2,
0197     CPT_IE_TYPE = 3,
0198 };
0199 
0200 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
0201                         blk_addr, NDC_AF_CONST) & 0xFF)
0202 
0203 #define rvu_dbg_NULL NULL
0204 #define rvu_dbg_open_NULL NULL
0205 
0206 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
0207 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
0208 { \
0209     return single_open(file, rvu_dbg_##read_op, inode->i_private); \
0210 } \
0211 static const struct file_operations rvu_dbg_##name##_fops = { \
0212     .owner      = THIS_MODULE, \
0213     .open       = rvu_dbg_open_##name, \
0214     .read       = seq_read, \
0215     .write      = rvu_dbg_##write_op, \
0216     .llseek     = seq_lseek, \
0217     .release    = single_release, \
0218 }
0219 
0220 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
0221 static const struct file_operations rvu_dbg_##name##_fops = { \
0222     .owner = THIS_MODULE, \
0223     .open = simple_open, \
0224     .read = rvu_dbg_##read_op, \
0225     .write = rvu_dbg_##write_op \
0226 }
0227 
0228 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
0229 
0230 #define LMT_MAPTBL_ENTRY_SIZE 16
0231 /* Dump LMTST map table */
0232 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
0233                            char __user *buffer,
0234                            size_t count, loff_t *ppos)
0235 {
0236     struct rvu *rvu = filp->private_data;
0237     u64 lmt_addr, val, tbl_base;
0238     int pf, vf, num_vfs, hw_vfs;
0239     void __iomem *lmt_map_base;
0240     int buf_size = 10240;
0241     size_t off = 0;
0242     int index = 0;
0243     char *buf;
0244     int ret;
0245 
0246     /* don't allow partial reads */
0247     if (*ppos != 0)
0248         return 0;
0249 
0250     buf = kzalloc(buf_size, GFP_KERNEL);
0251     if (!buf)
0252         return -ENOMEM;
0253 
0254     tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
0255 
0256     lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
0257     if (!lmt_map_base) {
0258         dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
0259         kfree(buf);
0260         return false;
0261     }
0262 
0263     off +=  scnprintf(&buf[off], buf_size - 1 - off,
0264               "\n\t\t\t\t\tLmtst Map Table Entries");
0265     off +=  scnprintf(&buf[off], buf_size - 1 - off,
0266               "\n\t\t\t\t\t=======================");
0267     off +=  scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
0268     off +=  scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
0269     off +=  scnprintf(&buf[off], buf_size - 1 - off,
0270               "Lmtline Base (word 0)\t\t");
0271     off +=  scnprintf(&buf[off], buf_size - 1 - off,
0272               "Lmt Map Entry (word 1)");
0273     off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
0274     for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
0275         off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
0276                     pf);
0277 
0278         index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
0279         off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
0280                  (tbl_base + index));
0281         lmt_addr = readq(lmt_map_base + index);
0282         off += scnprintf(&buf[off], buf_size - 1 - off,
0283                  " 0x%016llx\t\t", lmt_addr);
0284         index += 8;
0285         val = readq(lmt_map_base + index);
0286         off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
0287                  val);
0288         /* Reading num of VFs per PF */
0289         rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
0290         for (vf = 0; vf < num_vfs; vf++) {
0291             index = (pf * rvu->hw->total_vfs * 16) +
0292                 ((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
0293             off += scnprintf(&buf[off], buf_size - 1 - off,
0294                         "PF%d:VF%d  \t\t", pf, vf);
0295             off += scnprintf(&buf[off], buf_size - 1 - off,
0296                      " 0x%llx\t\t", (tbl_base + index));
0297             lmt_addr = readq(lmt_map_base + index);
0298             off += scnprintf(&buf[off], buf_size - 1 - off,
0299                      " 0x%016llx\t\t", lmt_addr);
0300             index += 8;
0301             val = readq(lmt_map_base + index);
0302             off += scnprintf(&buf[off], buf_size - 1 - off,
0303                      " 0x%016llx\n", val);
0304         }
0305     }
0306     off +=  scnprintf(&buf[off], buf_size - 1 - off, "\n");
0307 
0308     ret = min(off, count);
0309     if (copy_to_user(buffer, buf, ret))
0310         ret = -EFAULT;
0311     kfree(buf);
0312 
0313     iounmap(lmt_map_base);
0314     if (ret < 0)
0315         return ret;
0316 
0317     *ppos = ret;
0318     return ret;
0319 }
0320 
0321 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
0322 
0323 static void get_lf_str_list(struct rvu_block block, int pcifunc,
0324                 char *lfs)
0325 {
0326     int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
0327 
0328     for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
0329         if (lf >= block.lf.max)
0330             break;
0331 
0332         if (block.fn_map[lf] != pcifunc)
0333             continue;
0334 
0335         if (lf == prev_lf + 1) {
0336             prev_lf = lf;
0337             seq = 1;
0338             continue;
0339         }
0340 
0341         if (seq)
0342             len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
0343         else
0344             len += (len ? sprintf(lfs + len, ",%d", lf) :
0345                       sprintf(lfs + len, "%d", lf));
0346 
0347         prev_lf = lf;
0348         seq = 0;
0349     }
0350 
0351     if (seq)
0352         len += sprintf(lfs + len, "-%d", prev_lf);
0353 
0354     lfs[len] = '\0';
0355 }
0356 
0357 static int get_max_column_width(struct rvu *rvu)
0358 {
0359     int index, pf, vf, lf_str_size = 12, buf_size = 256;
0360     struct rvu_block block;
0361     u16 pcifunc;
0362     char *buf;
0363 
0364     buf = kzalloc(buf_size, GFP_KERNEL);
0365     if (!buf)
0366         return -ENOMEM;
0367 
0368     for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
0369         for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
0370             pcifunc = pf << 10 | vf;
0371             if (!pcifunc)
0372                 continue;
0373 
0374             for (index = 0; index < BLK_COUNT; index++) {
0375                 block = rvu->hw->block[index];
0376                 if (!strlen(block.name))
0377                     continue;
0378 
0379                 get_lf_str_list(block, pcifunc, buf);
0380                 if (lf_str_size <= strlen(buf))
0381                     lf_str_size = strlen(buf) + 1;
0382             }
0383         }
0384     }
0385 
0386     kfree(buf);
0387     return lf_str_size;
0388 }
0389 
0390 /* Dumps current provisioning status of all RVU block LFs */
0391 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
0392                       char __user *buffer,
0393                       size_t count, loff_t *ppos)
0394 {
0395     int index, off = 0, flag = 0, len = 0, i = 0;
0396     struct rvu *rvu = filp->private_data;
0397     int bytes_not_copied = 0;
0398     struct rvu_block block;
0399     int pf, vf, pcifunc;
0400     int buf_size = 2048;
0401     int lf_str_size;
0402     char *lfs;
0403     char *buf;
0404 
0405     /* don't allow partial reads */
0406     if (*ppos != 0)
0407         return 0;
0408 
0409     buf = kzalloc(buf_size, GFP_KERNEL);
0410     if (!buf)
0411         return -ENOMEM;
0412 
0413     /* Get the maximum width of a column */
0414     lf_str_size = get_max_column_width(rvu);
0415 
0416     lfs = kzalloc(lf_str_size, GFP_KERNEL);
0417     if (!lfs) {
0418         kfree(buf);
0419         return -ENOMEM;
0420     }
0421     off +=  scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
0422               "pcifunc");
0423     for (index = 0; index < BLK_COUNT; index++)
0424         if (strlen(rvu->hw->block[index].name)) {
0425             off += scnprintf(&buf[off], buf_size - 1 - off,
0426                      "%-*s", lf_str_size,
0427                      rvu->hw->block[index].name);
0428         }
0429 
0430     off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
0431     bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
0432     if (bytes_not_copied)
0433         goto out;
0434 
0435     i++;
0436     *ppos += off;
0437     for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
0438         for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
0439             off = 0;
0440             flag = 0;
0441             pcifunc = pf << 10 | vf;
0442             if (!pcifunc)
0443                 continue;
0444 
0445             if (vf) {
0446                 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
0447                 off = scnprintf(&buf[off],
0448                         buf_size - 1 - off,
0449                         "%-*s", lf_str_size, lfs);
0450             } else {
0451                 sprintf(lfs, "PF%d", pf);
0452                 off = scnprintf(&buf[off],
0453                         buf_size - 1 - off,
0454                         "%-*s", lf_str_size, lfs);
0455             }
0456 
0457             for (index = 0; index < BLK_COUNT; index++) {
0458                 block = rvu->hw->block[index];
0459                 if (!strlen(block.name))
0460                     continue;
0461                 len = 0;
0462                 lfs[len] = '\0';
0463                 get_lf_str_list(block, pcifunc, lfs);
0464                 if (strlen(lfs))
0465                     flag = 1;
0466 
0467                 off += scnprintf(&buf[off], buf_size - 1 - off,
0468                          "%-*s", lf_str_size, lfs);
0469             }
0470             if (flag) {
0471                 off +=  scnprintf(&buf[off],
0472                           buf_size - 1 - off, "\n");
0473                 bytes_not_copied = copy_to_user(buffer +
0474                                 (i * off),
0475                                 buf, off);
0476                 if (bytes_not_copied)
0477                     goto out;
0478 
0479                 i++;
0480                 *ppos += off;
0481             }
0482         }
0483     }
0484 
0485 out:
0486     kfree(lfs);
0487     kfree(buf);
0488     if (bytes_not_copied)
0489         return -EFAULT;
0490 
0491     return *ppos;
0492 }
0493 
0494 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
0495 
0496 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
0497 {
0498     struct rvu *rvu = filp->private;
0499     struct pci_dev *pdev = NULL;
0500     struct mac_ops *mac_ops;
0501     char cgx[10], lmac[10];
0502     struct rvu_pfvf *pfvf;
0503     int pf, domain, blkid;
0504     u8 cgx_id, lmac_id;
0505     u16 pcifunc;
0506 
0507     domain = 2;
0508     mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
0509     /* There can be no CGX devices at all */
0510     if (!mac_ops)
0511         return 0;
0512     seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
0513            mac_ops->name);
0514     for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
0515         if (!is_pf_cgxmapped(rvu, pf))
0516             continue;
0517 
0518         pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
0519         if (!pdev)
0520             continue;
0521 
0522         cgx[0] = 0;
0523         lmac[0] = 0;
0524         pcifunc = pf << 10;
0525         pfvf = rvu_get_pfvf(rvu, pcifunc);
0526 
0527         if (pfvf->nix_blkaddr == BLKADDR_NIX0)
0528             blkid = 0;
0529         else
0530             blkid = 1;
0531 
0532         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
0533                     &lmac_id);
0534         sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
0535         sprintf(lmac, "LMAC%d", lmac_id);
0536         seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
0537                dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
0538     }
0539     return 0;
0540 }
0541 
0542 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
0543 
0544 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
0545                 u16 *pcifunc)
0546 {
0547     struct rvu_block *block;
0548     struct rvu_hwinfo *hw;
0549 
0550     hw = rvu->hw;
0551     block = &hw->block[blkaddr];
0552 
0553     if (lf < 0 || lf >= block->lf.max) {
0554         dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
0555              block->lf.max - 1);
0556         return false;
0557     }
0558 
0559     *pcifunc = block->fn_map[lf];
0560     if (!*pcifunc) {
0561         dev_warn(rvu->dev,
0562              "This LF is not attached to any RVU PFFUNC\n");
0563         return false;
0564     }
0565     return true;
0566 }
0567 
0568 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
0569 {
0570     char *buf;
0571 
0572     buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
0573     if (!buf)
0574         return;
0575 
0576     if (!pfvf->aura_ctx) {
0577         seq_puts(m, "Aura context is not initialized\n");
0578     } else {
0579         bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
0580                     pfvf->aura_ctx->qsize);
0581         seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
0582         seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
0583     }
0584 
0585     if (!pfvf->pool_ctx) {
0586         seq_puts(m, "Pool context is not initialized\n");
0587     } else {
0588         bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
0589                     pfvf->pool_ctx->qsize);
0590         seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
0591         seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
0592     }
0593     kfree(buf);
0594 }
0595 
0596 /* The 'qsize' entry dumps current Aura/Pool context Qsize
0597  * and each context's current enable/disable status in a bitmap.
0598  */
0599 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
0600                  int blktype)
0601 {
0602     void (*print_qsize)(struct seq_file *filp,
0603                 struct rvu_pfvf *pfvf) = NULL;
0604     struct dentry *current_dir;
0605     struct rvu_pfvf *pfvf;
0606     struct rvu *rvu;
0607     int qsize_id;
0608     u16 pcifunc;
0609     int blkaddr;
0610 
0611     rvu = filp->private;
0612     switch (blktype) {
0613     case BLKTYPE_NPA:
0614         qsize_id = rvu->rvu_dbg.npa_qsize_id;
0615         print_qsize = print_npa_qsize;
0616         break;
0617 
0618     case BLKTYPE_NIX:
0619         qsize_id = rvu->rvu_dbg.nix_qsize_id;
0620         print_qsize = print_nix_qsize;
0621         break;
0622 
0623     default:
0624         return -EINVAL;
0625     }
0626 
0627     if (blktype == BLKTYPE_NPA) {
0628         blkaddr = BLKADDR_NPA;
0629     } else {
0630         current_dir = filp->file->f_path.dentry->d_parent;
0631         blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
0632                    BLKADDR_NIX1 : BLKADDR_NIX0);
0633     }
0634 
0635     if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
0636         return -EINVAL;
0637 
0638     pfvf = rvu_get_pfvf(rvu, pcifunc);
0639     print_qsize(filp, pfvf);
0640 
0641     return 0;
0642 }
0643 
0644 static ssize_t rvu_dbg_qsize_write(struct file *filp,
0645                    const char __user *buffer, size_t count,
0646                    loff_t *ppos, int blktype)
0647 {
0648     char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
0649     struct seq_file *seqfile = filp->private_data;
0650     char *cmd_buf, *cmd_buf_tmp, *subtoken;
0651     struct rvu *rvu = seqfile->private;
0652     struct dentry *current_dir;
0653     int blkaddr;
0654     u16 pcifunc;
0655     int ret, lf;
0656 
0657     cmd_buf = memdup_user(buffer, count + 1);
0658     if (IS_ERR(cmd_buf))
0659         return -ENOMEM;
0660 
0661     cmd_buf[count] = '\0';
0662 
0663     cmd_buf_tmp = strchr(cmd_buf, '\n');
0664     if (cmd_buf_tmp) {
0665         *cmd_buf_tmp = '\0';
0666         count = cmd_buf_tmp - cmd_buf + 1;
0667     }
0668 
0669     cmd_buf_tmp = cmd_buf;
0670     subtoken = strsep(&cmd_buf, " ");
0671     ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
0672     if (cmd_buf)
0673         ret = -EINVAL;
0674 
0675     if (ret < 0 || !strncmp(subtoken, "help", 4)) {
0676         dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
0677         goto qsize_write_done;
0678     }
0679 
0680     if (blktype == BLKTYPE_NPA) {
0681         blkaddr = BLKADDR_NPA;
0682     } else {
0683         current_dir = filp->f_path.dentry->d_parent;
0684         blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
0685                    BLKADDR_NIX1 : BLKADDR_NIX0);
0686     }
0687 
0688     if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
0689         ret = -EINVAL;
0690         goto qsize_write_done;
0691     }
0692     if (blktype  == BLKTYPE_NPA)
0693         rvu->rvu_dbg.npa_qsize_id = lf;
0694     else
0695         rvu->rvu_dbg.nix_qsize_id = lf;
0696 
0697 qsize_write_done:
0698     kfree(cmd_buf_tmp);
0699     return ret ? ret : count;
0700 }
0701 
0702 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
0703                        const char __user *buffer,
0704                        size_t count, loff_t *ppos)
0705 {
0706     return rvu_dbg_qsize_write(filp, buffer, count, ppos,
0707                         BLKTYPE_NPA);
0708 }
0709 
0710 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
0711 {
0712     return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
0713 }
0714 
0715 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
0716 
0717 /* Dumps given NPA Aura's context */
0718 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
0719 {
0720     struct npa_aura_s *aura = &rsp->aura;
0721     struct rvu *rvu = m->private;
0722 
0723     seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
0724 
0725     seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
0726            aura->ena, aura->pool_caching);
0727     seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
0728            aura->pool_way_mask, aura->avg_con);
0729     seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
0730            aura->pool_drop_ena, aura->aura_drop_ena);
0731     seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
0732            aura->bp_ena, aura->aura_drop);
0733     seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
0734            aura->shift, aura->avg_level);
0735 
0736     seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
0737            (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
0738 
0739     seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
0740            (u64)aura->limit, aura->bp, aura->fc_ena);
0741 
0742     if (!is_rvu_otx2(rvu))
0743         seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
0744     seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
0745            aura->fc_up_crossing, aura->fc_stype);
0746     seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
0747 
0748     seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
0749 
0750     seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
0751            aura->pool_drop, aura->update_time);
0752     seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
0753            aura->err_int, aura->err_int_ena);
0754     seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
0755            aura->thresh_int, aura->thresh_int_ena);
0756     seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
0757            aura->thresh_up, aura->thresh_qint_idx);
0758     seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
0759 
0760     seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
0761     if (!is_rvu_otx2(rvu))
0762         seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
0763 }
0764 
0765 /* Dumps given NPA Pool's context */
0766 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
0767 {
0768     struct npa_pool_s *pool = &rsp->pool;
0769     struct rvu *rvu = m->private;
0770 
0771     seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
0772 
0773     seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
0774            pool->ena, pool->nat_align);
0775     seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
0776            pool->stack_caching, pool->stack_way_mask);
0777     seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
0778            pool->buf_offset, pool->buf_size);
0779 
0780     seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
0781            pool->stack_max_pages, pool->stack_pages);
0782 
0783     seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
0784 
0785     seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
0786            pool->stack_offset, pool->shift, pool->avg_level);
0787     seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
0788            pool->avg_con, pool->fc_ena, pool->fc_stype);
0789     seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
0790            pool->fc_hyst_bits, pool->fc_up_crossing);
0791     if (!is_rvu_otx2(rvu))
0792         seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
0793     seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
0794 
0795     seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
0796 
0797     seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
0798 
0799     seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
0800 
0801     seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
0802            pool->err_int, pool->err_int_ena);
0803     seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
0804     seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
0805            pool->thresh_int_ena, pool->thresh_up);
0806     seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
0807            pool->thresh_qint_idx, pool->err_qint_idx);
0808     if (!is_rvu_otx2(rvu))
0809         seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
0810 }
0811 
0812 /* Reads aura/pool's ctx from admin queue */
0813 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
0814 {
0815     void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
0816     struct npa_aq_enq_req aq_req;
0817     struct npa_aq_enq_rsp rsp;
0818     struct rvu_pfvf *pfvf;
0819     int aura, rc, max_id;
0820     int npalf, id, all;
0821     struct rvu *rvu;
0822     u16 pcifunc;
0823 
0824     rvu = m->private;
0825 
0826     switch (ctype) {
0827     case NPA_AQ_CTYPE_AURA:
0828         npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
0829         id = rvu->rvu_dbg.npa_aura_ctx.id;
0830         all = rvu->rvu_dbg.npa_aura_ctx.all;
0831         break;
0832 
0833     case NPA_AQ_CTYPE_POOL:
0834         npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
0835         id = rvu->rvu_dbg.npa_pool_ctx.id;
0836         all = rvu->rvu_dbg.npa_pool_ctx.all;
0837         break;
0838     default:
0839         return -EINVAL;
0840     }
0841 
0842     if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
0843         return -EINVAL;
0844 
0845     pfvf = rvu_get_pfvf(rvu, pcifunc);
0846     if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
0847         seq_puts(m, "Aura context is not initialized\n");
0848         return -EINVAL;
0849     } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
0850         seq_puts(m, "Pool context is not initialized\n");
0851         return -EINVAL;
0852     }
0853 
0854     memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
0855     aq_req.hdr.pcifunc = pcifunc;
0856     aq_req.ctype = ctype;
0857     aq_req.op = NPA_AQ_INSTOP_READ;
0858     if (ctype == NPA_AQ_CTYPE_AURA) {
0859         max_id = pfvf->aura_ctx->qsize;
0860         print_npa_ctx = print_npa_aura_ctx;
0861     } else {
0862         max_id = pfvf->pool_ctx->qsize;
0863         print_npa_ctx = print_npa_pool_ctx;
0864     }
0865 
0866     if (id < 0 || id >= max_id) {
0867         seq_printf(m, "Invalid %s, valid range is 0-%d\n",
0868                (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
0869             max_id - 1);
0870         return -EINVAL;
0871     }
0872 
0873     if (all)
0874         id = 0;
0875     else
0876         max_id = id + 1;
0877 
0878     for (aura = id; aura < max_id; aura++) {
0879         aq_req.aura_id = aura;
0880         seq_printf(m, "======%s : %d=======\n",
0881                (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
0882             aq_req.aura_id);
0883         rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
0884         if (rc) {
0885             seq_puts(m, "Failed to read context\n");
0886             return -EINVAL;
0887         }
0888         print_npa_ctx(m, &rsp);
0889     }
0890     return 0;
0891 }
0892 
0893 static int write_npa_ctx(struct rvu *rvu, bool all,
0894              int npalf, int id, int ctype)
0895 {
0896     struct rvu_pfvf *pfvf;
0897     int max_id = 0;
0898     u16 pcifunc;
0899 
0900     if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
0901         return -EINVAL;
0902 
0903     pfvf = rvu_get_pfvf(rvu, pcifunc);
0904 
0905     if (ctype == NPA_AQ_CTYPE_AURA) {
0906         if (!pfvf->aura_ctx) {
0907             dev_warn(rvu->dev, "Aura context is not initialized\n");
0908             return -EINVAL;
0909         }
0910         max_id = pfvf->aura_ctx->qsize;
0911     } else if (ctype == NPA_AQ_CTYPE_POOL) {
0912         if (!pfvf->pool_ctx) {
0913             dev_warn(rvu->dev, "Pool context is not initialized\n");
0914             return -EINVAL;
0915         }
0916         max_id = pfvf->pool_ctx->qsize;
0917     }
0918 
0919     if (id < 0 || id >= max_id) {
0920         dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
0921              (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
0922             max_id - 1);
0923         return -EINVAL;
0924     }
0925 
0926     switch (ctype) {
0927     case NPA_AQ_CTYPE_AURA:
0928         rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
0929         rvu->rvu_dbg.npa_aura_ctx.id = id;
0930         rvu->rvu_dbg.npa_aura_ctx.all = all;
0931         break;
0932 
0933     case NPA_AQ_CTYPE_POOL:
0934         rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
0935         rvu->rvu_dbg.npa_pool_ctx.id = id;
0936         rvu->rvu_dbg.npa_pool_ctx.all = all;
0937         break;
0938     default:
0939         return -EINVAL;
0940     }
0941     return 0;
0942 }
0943 
0944 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
0945                 const char __user *buffer, int *npalf,
0946                 int *id, bool *all)
0947 {
0948     int bytes_not_copied;
0949     char *cmd_buf_tmp;
0950     char *subtoken;
0951     int ret;
0952 
0953     bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
0954     if (bytes_not_copied)
0955         return -EFAULT;
0956 
0957     cmd_buf[*count] = '\0';
0958     cmd_buf_tmp = strchr(cmd_buf, '\n');
0959 
0960     if (cmd_buf_tmp) {
0961         *cmd_buf_tmp = '\0';
0962         *count = cmd_buf_tmp - cmd_buf + 1;
0963     }
0964 
0965     subtoken = strsep(&cmd_buf, " ");
0966     ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
0967     if (ret < 0)
0968         return ret;
0969     subtoken = strsep(&cmd_buf, " ");
0970     if (subtoken && strcmp(subtoken, "all") == 0) {
0971         *all = true;
0972     } else {
0973         ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
0974         if (ret < 0)
0975             return ret;
0976     }
0977     if (cmd_buf)
0978         return -EINVAL;
0979     return ret;
0980 }
0981 
0982 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
0983                      const char __user *buffer,
0984                      size_t count, loff_t *ppos, int ctype)
0985 {
0986     char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
0987                     "aura" : "pool";
0988     struct seq_file *seqfp = filp->private_data;
0989     struct rvu *rvu = seqfp->private;
0990     int npalf, id = 0, ret;
0991     bool all = false;
0992 
0993     if ((*ppos != 0) || !count)
0994         return -EINVAL;
0995 
0996     cmd_buf = kzalloc(count + 1, GFP_KERNEL);
0997     if (!cmd_buf)
0998         return count;
0999     ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1000                    &npalf, &id, &all);
1001     if (ret < 0) {
1002         dev_info(rvu->dev,
1003              "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1004              ctype_string, ctype_string);
1005         goto done;
1006     } else {
1007         ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1008     }
1009 done:
1010     kfree(cmd_buf);
1011     return ret ? ret : count;
1012 }
1013 
1014 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1015                       const char __user *buffer,
1016                       size_t count, loff_t *ppos)
1017 {
1018     return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1019                      NPA_AQ_CTYPE_AURA);
1020 }
1021 
1022 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1023 {
1024     return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1025 }
1026 
1027 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1028 
1029 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1030                       const char __user *buffer,
1031                       size_t count, loff_t *ppos)
1032 {
1033     return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1034                      NPA_AQ_CTYPE_POOL);
1035 }
1036 
1037 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1038 {
1039     return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1040 }
1041 
1042 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1043 
1044 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1045                 int ctype, int transaction)
1046 {
1047     u64 req, out_req, lat, cant_alloc;
1048     struct nix_hw *nix_hw;
1049     struct rvu *rvu;
1050     int port;
1051 
1052     if (blk_addr == BLKADDR_NDC_NPA0) {
1053         rvu = s->private;
1054     } else {
1055         nix_hw = s->private;
1056         rvu = nix_hw->rvu;
1057     }
1058 
1059     for (port = 0; port < NDC_MAX_PORT; port++) {
1060         req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1061                         (port, ctype, transaction));
1062         lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1063                         (port, ctype, transaction));
1064         out_req = rvu_read64(rvu, blk_addr,
1065                      NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1066                      (port, ctype, transaction));
1067         cant_alloc = rvu_read64(rvu, blk_addr,
1068                     NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1069                     (port, transaction));
1070         seq_printf(s, "\nPort:%d\n", port);
1071         seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1072         seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1073         seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1074         seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1075         seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1076     }
1077 }
1078 
1079 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1080 {
1081     seq_puts(s, "\n***** CACHE mode read stats *****\n");
1082     ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1083     seq_puts(s, "\n***** CACHE mode write stats *****\n");
1084     ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1085     seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1086     ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1087     seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1088     ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1089     return 0;
1090 }
1091 
1092 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1093 {
1094     return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1095 }
1096 
1097 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1098 
1099 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1100 {
1101     struct nix_hw *nix_hw;
1102     struct rvu *rvu;
1103     int bank, max_bank;
1104 
1105     if (blk_addr == BLKADDR_NDC_NPA0) {
1106         rvu = s->private;
1107     } else {
1108         nix_hw = s->private;
1109         rvu = nix_hw->rvu;
1110     }
1111 
1112     max_bank = NDC_MAX_BANK(rvu, blk_addr);
1113     for (bank = 0; bank < max_bank; bank++) {
1114         seq_printf(s, "BANK:%d\n", bank);
1115         seq_printf(s, "\tHits:\t%lld\n",
1116                (u64)rvu_read64(rvu, blk_addr,
1117                NDC_AF_BANKX_HIT_PC(bank)));
1118         seq_printf(s, "\tMiss:\t%lld\n",
1119                (u64)rvu_read64(rvu, blk_addr,
1120                 NDC_AF_BANKX_MISS_PC(bank)));
1121     }
1122     return 0;
1123 }
1124 
1125 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1126 {
1127     struct nix_hw *nix_hw = filp->private;
1128     int blkaddr = 0;
1129     int ndc_idx = 0;
1130 
1131     blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1132            BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1133     ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1134 
1135     return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1136 }
1137 
1138 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1139 
1140 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1141 {
1142     struct nix_hw *nix_hw = filp->private;
1143     int blkaddr = 0;
1144     int ndc_idx = 0;
1145 
1146     blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1147            BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1148     ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1149 
1150     return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1151 }
1152 
1153 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1154 
1155 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1156                          void *unused)
1157 {
1158     return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1159 }
1160 
1161 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1162 
1163 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1164                         void *unused)
1165 {
1166     struct nix_hw *nix_hw = filp->private;
1167     int ndc_idx = NPA0_U;
1168     int blkaddr = 0;
1169 
1170     blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1171            BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1172 
1173     return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1174 }
1175 
1176 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1177 
1178 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1179                         void *unused)
1180 {
1181     struct nix_hw *nix_hw = filp->private;
1182     int ndc_idx = NPA0_U;
1183     int blkaddr = 0;
1184 
1185     blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1186            BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1187 
1188     return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1189 }
1190 
1191 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1192 
1193 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1194                    struct nix_cn10k_sq_ctx_s *sq_ctx)
1195 {
1196     seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1197            sq_ctx->ena, sq_ctx->qint_idx);
1198     seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1199            sq_ctx->substream, sq_ctx->sdp_mcast);
1200     seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1201            sq_ctx->cq, sq_ctx->sqe_way_mask);
1202 
1203     seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1204            sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1205     seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1206            sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1207     seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1208            sq_ctx->default_chan, sq_ctx->sqb_count);
1209 
1210     seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1211     seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1212     seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1213            sq_ctx->sqb_aura, sq_ctx->sq_int);
1214     seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1215            sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1216 
1217     seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1218            sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1219     seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1220            sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1221     seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1222            sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1223     seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1224            sq_ctx->tail_offset, sq_ctx->smenq_offset);
1225     seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1226            sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1227 
1228     seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1229            sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1230     seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1231     seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1232     seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1233     seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1234            sq_ctx->smenq_next_sqb);
1235 
1236     seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1237 
1238     seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1239     seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1240            sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1241     seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1242            sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1243     seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1244            sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1245 
1246     seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1247            (u64)sq_ctx->scm_lso_rem);
1248     seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1249     seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1250     seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1251            (u64)sq_ctx->dropped_octs);
1252     seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1253            (u64)sq_ctx->dropped_pkts);
1254 }
1255 
1256 /* Dumps given nix_sq's context */
1257 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1258 {
1259     struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1260     struct nix_hw *nix_hw = m->private;
1261     struct rvu *rvu = nix_hw->rvu;
1262 
1263     if (!is_rvu_otx2(rvu)) {
1264         print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1265         return;
1266     }
1267     seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1268            sq_ctx->sqe_way_mask, sq_ctx->cq);
1269     seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1270            sq_ctx->sdp_mcast, sq_ctx->substream);
1271     seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1272            sq_ctx->qint_idx, sq_ctx->ena);
1273 
1274     seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1275            sq_ctx->sqb_count, sq_ctx->default_chan);
1276     seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1277            sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1278     seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1279            sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1280 
1281     seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1282            sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1283     seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1284            sq_ctx->sq_int, sq_ctx->sqb_aura);
1285     seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1286 
1287     seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1288            sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1289     seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1290            sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1291     seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1292            sq_ctx->smenq_offset, sq_ctx->tail_offset);
1293     seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1294            sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1295     seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1296            sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1297     seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1298            sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1299 
1300     seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1301     seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1302     seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1303     seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1304            sq_ctx->smenq_next_sqb);
1305 
1306     seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1307 
1308     seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1309            sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1310     seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1311            sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1312     seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1313            sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1314     seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1315 
1316     seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1317            (u64)sq_ctx->scm_lso_rem);
1318     seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1319     seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1320     seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1321            (u64)sq_ctx->dropped_octs);
1322     seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1323            (u64)sq_ctx->dropped_pkts);
1324 }
1325 
1326 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1327                    struct nix_cn10k_rq_ctx_s *rq_ctx)
1328 {
1329     seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1330            rq_ctx->ena, rq_ctx->sso_ena);
1331     seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1332            rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1333     seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1334            rq_ctx->cq, rq_ctx->lenerr_dis);
1335     seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1336            rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1337     seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1338            rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1339     seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1340            rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1341     seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1342 
1343     seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1344            rq_ctx->spb_aura, rq_ctx->lpb_aura);
1345     seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1346     seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1347            rq_ctx->sso_grp, rq_ctx->sso_tt);
1348     seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1349            rq_ctx->pb_caching, rq_ctx->wqe_caching);
1350     seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1351            rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1352     seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1353            rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1354     seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1355            rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1356 
1357     seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1358     seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1359     seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1360     seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1361            rq_ctx->wqe_skip, rq_ctx->spb_ena);
1362     seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1363            rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1364     seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1365            rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1366     seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1367            rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1368 
1369     seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1370            rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1371     seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1372            rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1373     seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1374            rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1375     seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1376            rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1377 
1378     seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1379            rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1380     seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1381            rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1382     seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1383            rq_ctx->rq_int, rq_ctx->rq_int_ena);
1384     seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1385 
1386     seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1387            rq_ctx->ltag, rq_ctx->good_utag);
1388     seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1389            rq_ctx->bad_utag, rq_ctx->flow_tagw);
1390     seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1391            rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1392     seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1393            rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1394     seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1395 
1396     seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1397     seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1398     seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1399     seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1400     seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1401 }
1402 
1403 /* Dumps given nix_rq's context */
1404 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1405 {
1406     struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1407     struct nix_hw *nix_hw = m->private;
1408     struct rvu *rvu = nix_hw->rvu;
1409 
1410     if (!is_rvu_otx2(rvu)) {
1411         print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1412         return;
1413     }
1414 
1415     seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1416            rq_ctx->wqe_aura, rq_ctx->substream);
1417     seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1418            rq_ctx->cq, rq_ctx->ena_wqwd);
1419     seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1420            rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1421     seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1422 
1423     seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1424            rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1425     seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1426            rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1427     seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1428            rq_ctx->pb_caching, rq_ctx->sso_tt);
1429     seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1430            rq_ctx->sso_grp, rq_ctx->lpb_aura);
1431     seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1432 
1433     seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1434            rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1435     seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1436            rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1437     seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1438            rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1439     seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1440            rq_ctx->spb_ena, rq_ctx->wqe_skip);
1441     seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1442 
1443     seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1444            rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1445     seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1446            rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1447     seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1448            rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1449     seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1450            rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1451 
1452     seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1453            rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1454     seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1455            rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1456     seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1457            rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1458     seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1459 
1460     seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1461            rq_ctx->flow_tagw, rq_ctx->bad_utag);
1462     seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1463            rq_ctx->good_utag, rq_ctx->ltag);
1464 
1465     seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1466     seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1467     seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1468     seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1469     seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1470 }
1471 
1472 /* Dumps given nix_cq's context */
1473 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1474 {
1475     struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1476 
1477     seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1478 
1479     seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1480     seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1481            cq_ctx->avg_con, cq_ctx->cint_idx);
1482     seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1483            cq_ctx->cq_err, cq_ctx->qint_idx);
1484     seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1485            cq_ctx->bpid, cq_ctx->bp_ena);
1486 
1487     seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1488            cq_ctx->update_time, cq_ctx->avg_level);
1489     seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1490            cq_ctx->head, cq_ctx->tail);
1491 
1492     seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1493            cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1494     seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1495            cq_ctx->qsize, cq_ctx->caching);
1496     seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1497            cq_ctx->substream, cq_ctx->ena);
1498     seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1499            cq_ctx->drop_ena, cq_ctx->drop);
1500     seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1501 }
1502 
1503 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1504                      void *unused, int ctype)
1505 {
1506     void (*print_nix_ctx)(struct seq_file *filp,
1507                   struct nix_aq_enq_rsp *rsp) = NULL;
1508     struct nix_hw *nix_hw = filp->private;
1509     struct rvu *rvu = nix_hw->rvu;
1510     struct nix_aq_enq_req aq_req;
1511     struct nix_aq_enq_rsp rsp;
1512     char *ctype_string = NULL;
1513     int qidx, rc, max_id = 0;
1514     struct rvu_pfvf *pfvf;
1515     int nixlf, id, all;
1516     u16 pcifunc;
1517 
1518     switch (ctype) {
1519     case NIX_AQ_CTYPE_CQ:
1520         nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1521         id = rvu->rvu_dbg.nix_cq_ctx.id;
1522         all = rvu->rvu_dbg.nix_cq_ctx.all;
1523         break;
1524 
1525     case NIX_AQ_CTYPE_SQ:
1526         nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1527         id = rvu->rvu_dbg.nix_sq_ctx.id;
1528         all = rvu->rvu_dbg.nix_sq_ctx.all;
1529         break;
1530 
1531     case NIX_AQ_CTYPE_RQ:
1532         nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1533         id = rvu->rvu_dbg.nix_rq_ctx.id;
1534         all = rvu->rvu_dbg.nix_rq_ctx.all;
1535         break;
1536 
1537     default:
1538         return -EINVAL;
1539     }
1540 
1541     if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1542         return -EINVAL;
1543 
1544     pfvf = rvu_get_pfvf(rvu, pcifunc);
1545     if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1546         seq_puts(filp, "SQ context is not initialized\n");
1547         return -EINVAL;
1548     } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1549         seq_puts(filp, "RQ context is not initialized\n");
1550         return -EINVAL;
1551     } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1552         seq_puts(filp, "CQ context is not initialized\n");
1553         return -EINVAL;
1554     }
1555 
1556     if (ctype == NIX_AQ_CTYPE_SQ) {
1557         max_id = pfvf->sq_ctx->qsize;
1558         ctype_string = "sq";
1559         print_nix_ctx = print_nix_sq_ctx;
1560     } else if (ctype == NIX_AQ_CTYPE_RQ) {
1561         max_id = pfvf->rq_ctx->qsize;
1562         ctype_string = "rq";
1563         print_nix_ctx = print_nix_rq_ctx;
1564     } else if (ctype == NIX_AQ_CTYPE_CQ) {
1565         max_id = pfvf->cq_ctx->qsize;
1566         ctype_string = "cq";
1567         print_nix_ctx = print_nix_cq_ctx;
1568     }
1569 
1570     memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1571     aq_req.hdr.pcifunc = pcifunc;
1572     aq_req.ctype = ctype;
1573     aq_req.op = NIX_AQ_INSTOP_READ;
1574     if (all)
1575         id = 0;
1576     else
1577         max_id = id + 1;
1578     for (qidx = id; qidx < max_id; qidx++) {
1579         aq_req.qidx = qidx;
1580         seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1581                ctype_string, nixlf, aq_req.qidx);
1582         rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1583         if (rc) {
1584             seq_puts(filp, "Failed to read the context\n");
1585             return -EINVAL;
1586         }
1587         print_nix_ctx(filp, &rsp);
1588     }
1589     return 0;
1590 }
1591 
1592 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1593                    int id, int ctype, char *ctype_string,
1594                    struct seq_file *m)
1595 {
1596     struct nix_hw *nix_hw = m->private;
1597     struct rvu_pfvf *pfvf;
1598     int max_id = 0;
1599     u16 pcifunc;
1600 
1601     if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1602         return -EINVAL;
1603 
1604     pfvf = rvu_get_pfvf(rvu, pcifunc);
1605 
1606     if (ctype == NIX_AQ_CTYPE_SQ) {
1607         if (!pfvf->sq_ctx) {
1608             dev_warn(rvu->dev, "SQ context is not initialized\n");
1609             return -EINVAL;
1610         }
1611         max_id = pfvf->sq_ctx->qsize;
1612     } else if (ctype == NIX_AQ_CTYPE_RQ) {
1613         if (!pfvf->rq_ctx) {
1614             dev_warn(rvu->dev, "RQ context is not initialized\n");
1615             return -EINVAL;
1616         }
1617         max_id = pfvf->rq_ctx->qsize;
1618     } else if (ctype == NIX_AQ_CTYPE_CQ) {
1619         if (!pfvf->cq_ctx) {
1620             dev_warn(rvu->dev, "CQ context is not initialized\n");
1621             return -EINVAL;
1622         }
1623         max_id = pfvf->cq_ctx->qsize;
1624     }
1625 
1626     if (id < 0 || id >= max_id) {
1627         dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1628              ctype_string, max_id - 1);
1629         return -EINVAL;
1630     }
1631     switch (ctype) {
1632     case NIX_AQ_CTYPE_CQ:
1633         rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1634         rvu->rvu_dbg.nix_cq_ctx.id = id;
1635         rvu->rvu_dbg.nix_cq_ctx.all = all;
1636         break;
1637 
1638     case NIX_AQ_CTYPE_SQ:
1639         rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1640         rvu->rvu_dbg.nix_sq_ctx.id = id;
1641         rvu->rvu_dbg.nix_sq_ctx.all = all;
1642         break;
1643 
1644     case NIX_AQ_CTYPE_RQ:
1645         rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1646         rvu->rvu_dbg.nix_rq_ctx.id = id;
1647         rvu->rvu_dbg.nix_rq_ctx.all = all;
1648         break;
1649     default:
1650         return -EINVAL;
1651     }
1652     return 0;
1653 }
1654 
1655 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1656                        const char __user *buffer,
1657                        size_t count, loff_t *ppos,
1658                        int ctype)
1659 {
1660     struct seq_file *m = filp->private_data;
1661     struct nix_hw *nix_hw = m->private;
1662     struct rvu *rvu = nix_hw->rvu;
1663     char *cmd_buf, *ctype_string;
1664     int nixlf, id = 0, ret;
1665     bool all = false;
1666 
1667     if ((*ppos != 0) || !count)
1668         return -EINVAL;
1669 
1670     switch (ctype) {
1671     case NIX_AQ_CTYPE_SQ:
1672         ctype_string = "sq";
1673         break;
1674     case NIX_AQ_CTYPE_RQ:
1675         ctype_string = "rq";
1676         break;
1677     case NIX_AQ_CTYPE_CQ:
1678         ctype_string = "cq";
1679         break;
1680     default:
1681         return -EINVAL;
1682     }
1683 
1684     cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1685 
1686     if (!cmd_buf)
1687         return count;
1688 
1689     ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1690                    &nixlf, &id, &all);
1691     if (ret < 0) {
1692         dev_info(rvu->dev,
1693              "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1694              ctype_string, ctype_string);
1695         goto done;
1696     } else {
1697         ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1698                       ctype_string, m);
1699     }
1700 done:
1701     kfree(cmd_buf);
1702     return ret ? ret : count;
1703 }
1704 
1705 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1706                     const char __user *buffer,
1707                     size_t count, loff_t *ppos)
1708 {
1709     return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1710                         NIX_AQ_CTYPE_SQ);
1711 }
1712 
1713 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1714 {
1715     return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1716 }
1717 
1718 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1719 
1720 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1721                     const char __user *buffer,
1722                     size_t count, loff_t *ppos)
1723 {
1724     return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1725                         NIX_AQ_CTYPE_RQ);
1726 }
1727 
1728 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1729 {
1730     return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1731 }
1732 
1733 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1734 
1735 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1736                     const char __user *buffer,
1737                     size_t count, loff_t *ppos)
1738 {
1739     return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1740                         NIX_AQ_CTYPE_CQ);
1741 }
1742 
1743 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1744 {
1745     return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1746 }
1747 
1748 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1749 
1750 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1751                  unsigned long *bmap, char *qtype)
1752 {
1753     char *buf;
1754 
1755     buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1756     if (!buf)
1757         return;
1758 
1759     bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1760     seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1761     seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1762            qtype, buf);
1763     kfree(buf);
1764 }
1765 
1766 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1767 {
1768     if (!pfvf->cq_ctx)
1769         seq_puts(filp, "cq context is not initialized\n");
1770     else
1771         print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1772                      "cq");
1773 
1774     if (!pfvf->rq_ctx)
1775         seq_puts(filp, "rq context is not initialized\n");
1776     else
1777         print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1778                      "rq");
1779 
1780     if (!pfvf->sq_ctx)
1781         seq_puts(filp, "sq context is not initialized\n");
1782     else
1783         print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1784                      "sq");
1785 }
1786 
1787 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1788                        const char __user *buffer,
1789                        size_t count, loff_t *ppos)
1790 {
1791     return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1792                    BLKTYPE_NIX);
1793 }
1794 
1795 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1796 {
1797     return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1798 }
1799 
1800 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1801 
1802 static void print_band_prof_ctx(struct seq_file *m,
1803                 struct nix_bandprof_s *prof)
1804 {
1805     char *str;
1806 
1807     switch (prof->pc_mode) {
1808     case NIX_RX_PC_MODE_VLAN:
1809         str = "VLAN";
1810         break;
1811     case NIX_RX_PC_MODE_DSCP:
1812         str = "DSCP";
1813         break;
1814     case NIX_RX_PC_MODE_GEN:
1815         str = "Generic";
1816         break;
1817     case NIX_RX_PC_MODE_RSVD:
1818         str = "Reserved";
1819         break;
1820     }
1821     seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1822     str = (prof->icolor == 3) ? "Color blind" :
1823         (prof->icolor == 0) ? "Green" :
1824         (prof->icolor == 1) ? "Yellow" : "Red";
1825     seq_printf(m, "W0: icolor\t\t%s\n", str);
1826     seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1827     seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1828     seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1829     seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1830     seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1831     seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1832     seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1833     seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1834 
1835     seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1836     str = (prof->lmode == 0) ? "byte" : "packet";
1837     seq_printf(m, "W1: lmode\t\t%s\n", str);
1838     seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1839     seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1840     seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1841     seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1842     str = (prof->gc_action == 0) ? "PASS" :
1843         (prof->gc_action == 1) ? "DROP" : "RED";
1844     seq_printf(m, "W1: gc_action\t\t%s\n", str);
1845     str = (prof->yc_action == 0) ? "PASS" :
1846         (prof->yc_action == 1) ? "DROP" : "RED";
1847     seq_printf(m, "W1: yc_action\t\t%s\n", str);
1848     str = (prof->rc_action == 0) ? "PASS" :
1849         (prof->rc_action == 1) ? "DROP" : "RED";
1850     seq_printf(m, "W1: rc_action\t\t%s\n", str);
1851     seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1852     seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1853     seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1854 
1855     seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1856     seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1857     seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1858     seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1859            (u64)prof->green_pkt_pass);
1860     seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1861            (u64)prof->yellow_pkt_pass);
1862     seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1863     seq_printf(m, "W7: green_octs_pass\t%lld\n",
1864            (u64)prof->green_octs_pass);
1865     seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1866            (u64)prof->yellow_octs_pass);
1867     seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1868     seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1869            (u64)prof->green_pkt_drop);
1870     seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1871            (u64)prof->yellow_pkt_drop);
1872     seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1873     seq_printf(m, "W13: green_octs_drop\t%lld\n",
1874            (u64)prof->green_octs_drop);
1875     seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1876            (u64)prof->yellow_octs_drop);
1877     seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1878     seq_puts(m, "==============================\n");
1879 }
1880 
1881 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1882 {
1883     struct nix_hw *nix_hw = m->private;
1884     struct nix_cn10k_aq_enq_req aq_req;
1885     struct nix_cn10k_aq_enq_rsp aq_rsp;
1886     struct rvu *rvu = nix_hw->rvu;
1887     struct nix_ipolicer *ipolicer;
1888     int layer, prof_idx, idx, rc;
1889     u16 pcifunc;
1890     char *str;
1891 
1892     /* Ingress policers do not exist on all platforms */
1893     if (!nix_hw->ipolicer)
1894         return 0;
1895 
1896     for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1897         if (layer == BAND_PROF_INVAL_LAYER)
1898             continue;
1899         str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1900             (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1901 
1902         seq_printf(m, "\n%s bandwidth profiles\n", str);
1903         seq_puts(m, "=======================\n");
1904 
1905         ipolicer = &nix_hw->ipolicer[layer];
1906 
1907         for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1908             if (is_rsrc_free(&ipolicer->band_prof, idx))
1909                 continue;
1910 
1911             prof_idx = (idx & 0x3FFF) | (layer << 14);
1912             rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1913                          0x00, NIX_AQ_CTYPE_BANDPROF,
1914                          prof_idx);
1915             if (rc) {
1916                 dev_err(rvu->dev,
1917                     "%s: Failed to fetch context of %s profile %d, err %d\n",
1918                     __func__, str, idx, rc);
1919                 return 0;
1920             }
1921             seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1922             pcifunc = ipolicer->pfvf_map[idx];
1923             if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1924                 seq_printf(m, "Allocated to :: PF %d\n",
1925                        rvu_get_pf(pcifunc));
1926             else
1927                 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1928                        rvu_get_pf(pcifunc),
1929                        (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1930             print_band_prof_ctx(m, &aq_rsp.prof);
1931         }
1932     }
1933     return 0;
1934 }
1935 
1936 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1937 
1938 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1939 {
1940     struct nix_hw *nix_hw = m->private;
1941     struct nix_ipolicer *ipolicer;
1942     int layer;
1943     char *str;
1944 
1945     /* Ingress policers do not exist on all platforms */
1946     if (!nix_hw->ipolicer)
1947         return 0;
1948 
1949     seq_puts(m, "\nBandwidth profile resource free count\n");
1950     seq_puts(m, "=====================================\n");
1951     for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1952         if (layer == BAND_PROF_INVAL_LAYER)
1953             continue;
1954         str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1955             (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1956 
1957         ipolicer = &nix_hw->ipolicer[layer];
1958         seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1959                ipolicer->band_prof.max,
1960                rvu_rsrc_free_count(&ipolicer->band_prof));
1961     }
1962     seq_puts(m, "=====================================\n");
1963 
1964     return 0;
1965 }
1966 
1967 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1968 
1969 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1970 {
1971     struct nix_hw *nix_hw;
1972 
1973     if (!is_block_implemented(rvu->hw, blkaddr))
1974         return;
1975 
1976     if (blkaddr == BLKADDR_NIX0) {
1977         rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1978         nix_hw = &rvu->hw->nix[0];
1979     } else {
1980         rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1981                               rvu->rvu_dbg.root);
1982         nix_hw = &rvu->hw->nix[1];
1983     }
1984 
1985     debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1986                 &rvu_dbg_nix_sq_ctx_fops);
1987     debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1988                 &rvu_dbg_nix_rq_ctx_fops);
1989     debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1990                 &rvu_dbg_nix_cq_ctx_fops);
1991     debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1992                 &rvu_dbg_nix_ndc_tx_cache_fops);
1993     debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1994                 &rvu_dbg_nix_ndc_rx_cache_fops);
1995     debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1996                 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1997     debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1998                 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1999     debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2000                 &rvu_dbg_nix_qsize_fops);
2001     debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2002                 &rvu_dbg_nix_band_prof_ctx_fops);
2003     debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2004                 &rvu_dbg_nix_band_prof_rsrc_fops);
2005 }
2006 
2007 static void rvu_dbg_npa_init(struct rvu *rvu)
2008 {
2009     rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2010 
2011     debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2012                 &rvu_dbg_npa_qsize_fops);
2013     debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2014                 &rvu_dbg_npa_aura_ctx_fops);
2015     debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2016                 &rvu_dbg_npa_pool_ctx_fops);
2017     debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2018                 &rvu_dbg_npa_ndc_cache_fops);
2019     debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2020                 &rvu_dbg_npa_ndc_hits_miss_fops);
2021 }
2022 
2023 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)              \
2024     ({                              \
2025         u64 cnt;                        \
2026         err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2027                          NIX_STATS_RX, &(cnt)); \
2028         if (!err)                       \
2029             seq_printf(s, "%s: %llu\n", name, cnt);     \
2030         cnt;                            \
2031     })
2032 
2033 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)          \
2034     ({                              \
2035         u64 cnt;                        \
2036         err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2037                       NIX_STATS_TX, &(cnt));    \
2038         if (!err)                       \
2039             seq_printf(s, "%s: %llu\n", name, cnt);     \
2040         cnt;                            \
2041     })
2042 
2043 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2044 {
2045     struct cgx_link_user_info linfo;
2046     struct mac_ops *mac_ops;
2047     void *cgxd = s->private;
2048     u64 ucast, mcast, bcast;
2049     int stat = 0, err = 0;
2050     u64 tx_stat, rx_stat;
2051     struct rvu *rvu;
2052 
2053     rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2054                          PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2055     if (!rvu)
2056         return -ENODEV;
2057 
2058     mac_ops = get_mac_ops(cgxd);
2059     /* There can be no CGX devices at all */
2060     if (!mac_ops)
2061         return 0;
2062 
2063     /* Link status */
2064     seq_puts(s, "\n=======Link Status======\n\n");
2065     err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2066     if (err)
2067         seq_puts(s, "Failed to read link status\n");
2068     seq_printf(s, "\nLink is %s %d Mbps\n\n",
2069            linfo.link_up ? "UP" : "DOWN", linfo.speed);
2070 
2071     /* Rx stats */
2072     seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2073            mac_ops->name);
2074     ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2075     if (err)
2076         return err;
2077     mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2078     if (err)
2079         return err;
2080     bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2081     if (err)
2082         return err;
2083     seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2084     PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2085     if (err)
2086         return err;
2087     PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2088     if (err)
2089         return err;
2090     PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2091     if (err)
2092         return err;
2093 
2094     /* Tx stats */
2095     seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2096            mac_ops->name);
2097     ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2098     if (err)
2099         return err;
2100     mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2101     if (err)
2102         return err;
2103     bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2104     if (err)
2105         return err;
2106     seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2107     PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2108     if (err)
2109         return err;
2110     PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2111     if (err)
2112         return err;
2113 
2114     /* Rx stats */
2115     seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2116     while (stat < mac_ops->rx_stats_cnt) {
2117         err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2118         if (err)
2119             return err;
2120         if (is_rvu_otx2(rvu))
2121             seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2122                    rx_stat);
2123         else
2124             seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2125                    rx_stat);
2126         stat++;
2127     }
2128 
2129     /* Tx stats */
2130     stat = 0;
2131     seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2132     while (stat < mac_ops->tx_stats_cnt) {
2133         err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2134         if (err)
2135             return err;
2136 
2137         if (is_rvu_otx2(rvu))
2138             seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2139                    tx_stat);
2140         else
2141             seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2142                    tx_stat);
2143         stat++;
2144     }
2145 
2146     return err;
2147 }
2148 
2149 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2150 {
2151     struct dentry *current_dir;
2152     char *buf;
2153 
2154     current_dir = filp->file->f_path.dentry->d_parent;
2155     buf = strrchr(current_dir->d_name.name, 'c');
2156     if (!buf)
2157         return -EINVAL;
2158 
2159     return kstrtoint(buf + 1, 10, lmac_id);
2160 }
2161 
2162 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2163 {
2164     int lmac_id, err;
2165 
2166     err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2167     if (!err)
2168         return cgx_print_stats(filp, lmac_id);
2169 
2170     return err;
2171 }
2172 
2173 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2174 
2175 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2176 {
2177     struct pci_dev *pdev = NULL;
2178     void *cgxd = s->private;
2179     char *bcast, *mcast;
2180     u16 index, domain;
2181     u8 dmac[ETH_ALEN];
2182     struct rvu *rvu;
2183     u64 cfg, mac;
2184     int pf;
2185 
2186     rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2187                          PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2188     if (!rvu)
2189         return -ENODEV;
2190 
2191     pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2192     domain = 2;
2193 
2194     pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2195     if (!pdev)
2196         return 0;
2197 
2198     cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2199     bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2200     mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2201 
2202     seq_puts(s,
2203          "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2204     seq_printf(s, "%s  PF%d  %9s  %9s",
2205            dev_name(&pdev->dev), pf, bcast, mcast);
2206     if (cfg & CGX_DMAC_CAM_ACCEPT)
2207         seq_printf(s, "%12s\n\n", "UNICAST");
2208     else
2209         seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2210 
2211     seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2212 
2213     for (index = 0 ; index < 32 ; index++) {
2214         cfg = cgx_read_dmac_entry(cgxd, index);
2215         /* Display enabled dmac entries associated with current lmac */
2216         if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2217             FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2218             mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2219             u64_to_ether_addr(mac, dmac);
2220             seq_printf(s, "%7d     %pM\n", index, dmac);
2221         }
2222     }
2223 
2224     return 0;
2225 }
2226 
2227 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2228 {
2229     int err, lmac_id;
2230 
2231     err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2232     if (!err)
2233         return cgx_print_dmac_flt(filp, lmac_id);
2234 
2235     return err;
2236 }
2237 
2238 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2239 
2240 static void rvu_dbg_cgx_init(struct rvu *rvu)
2241 {
2242     struct mac_ops *mac_ops;
2243     unsigned long lmac_bmap;
2244     int i, lmac_id;
2245     char dname[20];
2246     void *cgx;
2247 
2248     if (!cgx_get_cgxcnt_max())
2249         return;
2250 
2251     mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2252     if (!mac_ops)
2253         return;
2254 
2255     rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2256                            rvu->rvu_dbg.root);
2257 
2258     for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2259         cgx = rvu_cgx_pdata(i, rvu);
2260         if (!cgx)
2261             continue;
2262         lmac_bmap = cgx_get_lmac_bmap(cgx);
2263         /* cgx debugfs dir */
2264         sprintf(dname, "%s%d", mac_ops->name, i);
2265         rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2266                               rvu->rvu_dbg.cgx_root);
2267 
2268         for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2269             /* lmac debugfs dir */
2270             sprintf(dname, "lmac%d", lmac_id);
2271             rvu->rvu_dbg.lmac =
2272                 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2273 
2274             debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2275                         cgx, &rvu_dbg_cgx_stat_fops);
2276             debugfs_create_file("mac_filter", 0600,
2277                         rvu->rvu_dbg.lmac, cgx,
2278                         &rvu_dbg_cgx_dmac_flt_fops);
2279         }
2280     }
2281 }
2282 
2283 /* NPC debugfs APIs */
2284 static void rvu_print_npc_mcam_info(struct seq_file *s,
2285                     u16 pcifunc, int blkaddr)
2286 {
2287     struct rvu *rvu = s->private;
2288     int entry_acnt, entry_ecnt;
2289     int cntr_acnt, cntr_ecnt;
2290 
2291     rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2292                       &entry_acnt, &entry_ecnt);
2293     rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2294                         &cntr_acnt, &cntr_ecnt);
2295     if (!entry_acnt && !cntr_acnt)
2296         return;
2297 
2298     if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2299         seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2300                rvu_get_pf(pcifunc));
2301     else
2302         seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2303                rvu_get_pf(pcifunc),
2304                (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2305 
2306     if (entry_acnt) {
2307         seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2308         seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2309     }
2310     if (cntr_acnt) {
2311         seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2312         seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2313     }
2314 }
2315 
2316 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2317 {
2318     struct rvu *rvu = filp->private;
2319     int pf, vf, numvfs, blkaddr;
2320     struct npc_mcam *mcam;
2321     u16 pcifunc, counters;
2322     u64 cfg;
2323 
2324     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2325     if (blkaddr < 0)
2326         return -ENODEV;
2327 
2328     mcam = &rvu->hw->mcam;
2329     counters = rvu->hw->npc_counters;
2330 
2331     seq_puts(filp, "\nNPC MCAM info:\n");
2332     /* MCAM keywidth on receive and transmit sides */
2333     cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2334     cfg = (cfg >> 32) & 0x07;
2335     seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2336            "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2337            "224bits" : "448bits"));
2338     cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2339     cfg = (cfg >> 32) & 0x07;
2340     seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2341            "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2342            "224bits" : "448bits"));
2343 
2344     mutex_lock(&mcam->lock);
2345     /* MCAM entries */
2346     seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2347     seq_printf(filp, "\t\t Reserved \t: %d\n",
2348            mcam->total_entries - mcam->bmap_entries);
2349     seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2350 
2351     /* MCAM counters */
2352     seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2353     seq_printf(filp, "\t\t Reserved \t: %d\n",
2354            counters - mcam->counters.max);
2355     seq_printf(filp, "\t\t Available \t: %d\n",
2356            rvu_rsrc_free_count(&mcam->counters));
2357 
2358     if (mcam->bmap_entries == mcam->bmap_fcnt) {
2359         mutex_unlock(&mcam->lock);
2360         return 0;
2361     }
2362 
2363     seq_puts(filp, "\n\t\t Current allocation\n");
2364     seq_puts(filp, "\t\t====================\n");
2365     for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2366         pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2367         rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2368 
2369         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2370         numvfs = (cfg >> 12) & 0xFF;
2371         for (vf = 0; vf < numvfs; vf++) {
2372             pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2373             rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2374         }
2375     }
2376 
2377     mutex_unlock(&mcam->lock);
2378     return 0;
2379 }
2380 
2381 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2382 
2383 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2384                          void *unused)
2385 {
2386     struct rvu *rvu = filp->private;
2387     struct npc_mcam *mcam;
2388     int blkaddr;
2389 
2390     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2391     if (blkaddr < 0)
2392         return -ENODEV;
2393 
2394     mcam = &rvu->hw->mcam;
2395 
2396     seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2397     seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2398            rvu_read64(rvu, blkaddr,
2399                   NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2400 
2401     return 0;
2402 }
2403 
2404 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2405 
2406 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2407                     struct rvu_npc_mcam_rule *rule)
2408 {
2409     u8 bit;
2410 
2411     for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2412         seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2413         switch (bit) {
2414         case NPC_DMAC:
2415             seq_printf(s, "%pM ", rule->packet.dmac);
2416             seq_printf(s, "mask %pM\n", rule->mask.dmac);
2417             break;
2418         case NPC_SMAC:
2419             seq_printf(s, "%pM ", rule->packet.smac);
2420             seq_printf(s, "mask %pM\n", rule->mask.smac);
2421             break;
2422         case NPC_ETYPE:
2423             seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2424             seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2425             break;
2426         case NPC_OUTER_VID:
2427             seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2428             seq_printf(s, "mask 0x%x\n",
2429                    ntohs(rule->mask.vlan_tci));
2430             break;
2431         case NPC_TOS:
2432             seq_printf(s, "%d ", rule->packet.tos);
2433             seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2434             break;
2435         case NPC_SIP_IPV4:
2436             seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2437             seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2438             break;
2439         case NPC_DIP_IPV4:
2440             seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2441             seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2442             break;
2443         case NPC_SIP_IPV6:
2444             seq_printf(s, "%pI6 ", rule->packet.ip6src);
2445             seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2446             break;
2447         case NPC_DIP_IPV6:
2448             seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2449             seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2450             break;
2451         case NPC_SPORT_TCP:
2452         case NPC_SPORT_UDP:
2453         case NPC_SPORT_SCTP:
2454             seq_printf(s, "%d ", ntohs(rule->packet.sport));
2455             seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2456             break;
2457         case NPC_DPORT_TCP:
2458         case NPC_DPORT_UDP:
2459         case NPC_DPORT_SCTP:
2460             seq_printf(s, "%d ", ntohs(rule->packet.dport));
2461             seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2462             break;
2463         default:
2464             seq_puts(s, "\n");
2465             break;
2466         }
2467     }
2468 }
2469 
2470 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2471                      struct rvu_npc_mcam_rule *rule)
2472 {
2473     if (is_npc_intf_tx(rule->intf)) {
2474         switch (rule->tx_action.op) {
2475         case NIX_TX_ACTIONOP_DROP:
2476             seq_puts(s, "\taction: Drop\n");
2477             break;
2478         case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2479             seq_puts(s, "\taction: Unicast to default channel\n");
2480             break;
2481         case NIX_TX_ACTIONOP_UCAST_CHAN:
2482             seq_printf(s, "\taction: Unicast to channel %d\n",
2483                    rule->tx_action.index);
2484             break;
2485         case NIX_TX_ACTIONOP_MCAST:
2486             seq_puts(s, "\taction: Multicast\n");
2487             break;
2488         case NIX_TX_ACTIONOP_DROP_VIOL:
2489             seq_puts(s, "\taction: Lockdown Violation Drop\n");
2490             break;
2491         default:
2492             break;
2493         }
2494     } else {
2495         switch (rule->rx_action.op) {
2496         case NIX_RX_ACTIONOP_DROP:
2497             seq_puts(s, "\taction: Drop\n");
2498             break;
2499         case NIX_RX_ACTIONOP_UCAST:
2500             seq_printf(s, "\taction: Direct to queue %d\n",
2501                    rule->rx_action.index);
2502             break;
2503         case NIX_RX_ACTIONOP_RSS:
2504             seq_puts(s, "\taction: RSS\n");
2505             break;
2506         case NIX_RX_ACTIONOP_UCAST_IPSEC:
2507             seq_puts(s, "\taction: Unicast ipsec\n");
2508             break;
2509         case NIX_RX_ACTIONOP_MCAST:
2510             seq_puts(s, "\taction: Multicast\n");
2511             break;
2512         default:
2513             break;
2514         }
2515     }
2516 }
2517 
2518 static const char *rvu_dbg_get_intf_name(int intf)
2519 {
2520     switch (intf) {
2521     case NIX_INTFX_RX(0):
2522         return "NIX0_RX";
2523     case NIX_INTFX_RX(1):
2524         return "NIX1_RX";
2525     case NIX_INTFX_TX(0):
2526         return "NIX0_TX";
2527     case NIX_INTFX_TX(1):
2528         return "NIX1_TX";
2529     default:
2530         break;
2531     }
2532 
2533     return "unknown";
2534 }
2535 
2536 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2537 {
2538     struct rvu_npc_mcam_rule *iter;
2539     struct rvu *rvu = s->private;
2540     struct npc_mcam *mcam;
2541     int pf, vf = -1;
2542     bool enabled;
2543     int blkaddr;
2544     u16 target;
2545     u64 hits;
2546 
2547     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2548     if (blkaddr < 0)
2549         return 0;
2550 
2551     mcam = &rvu->hw->mcam;
2552 
2553     mutex_lock(&mcam->lock);
2554     list_for_each_entry(iter, &mcam->mcam_rules, list) {
2555         pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2556         seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2557 
2558         if (iter->owner & RVU_PFVF_FUNC_MASK) {
2559             vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2560             seq_printf(s, "VF%d", vf);
2561         }
2562         seq_puts(s, "\n");
2563 
2564         seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2565                             "RX" : "TX");
2566         seq_printf(s, "\tinterface: %s\n",
2567                rvu_dbg_get_intf_name(iter->intf));
2568         seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2569 
2570         rvu_dbg_npc_mcam_show_flows(s, iter);
2571         if (is_npc_intf_rx(iter->intf)) {
2572             target = iter->rx_action.pf_func;
2573             pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2574             seq_printf(s, "\tForward to: PF%d ", pf);
2575 
2576             if (target & RVU_PFVF_FUNC_MASK) {
2577                 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2578                 seq_printf(s, "VF%d", vf);
2579             }
2580             seq_puts(s, "\n");
2581             seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2582             seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2583         }
2584 
2585         rvu_dbg_npc_mcam_show_action(s, iter);
2586 
2587         enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2588         seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2589 
2590         if (!iter->has_cntr)
2591             continue;
2592         seq_printf(s, "\tcounter: %d\n", iter->cntr);
2593 
2594         hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2595         seq_printf(s, "\thits: %lld\n", hits);
2596     }
2597     mutex_unlock(&mcam->lock);
2598 
2599     return 0;
2600 }
2601 
2602 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2603 
2604 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
2605 {
2606     struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
2607     struct npc_exact_table_entry *cam_entry;
2608     struct npc_exact_table *table;
2609     struct rvu *rvu = s->private;
2610     int i, j;
2611 
2612     u8 bitmap = 0;
2613 
2614     table = rvu->hw->table;
2615 
2616     mutex_lock(&table->lock);
2617 
2618     /* Check if there is at least one entry in mem table */
2619     if (!table->mem_tbl_entry_cnt)
2620         goto dump_cam_table;
2621 
2622     /* Print table headers */
2623     seq_puts(s, "\n\tExact Match MEM Table\n");
2624     seq_puts(s, "Index\t");
2625 
2626     for (i = 0; i < table->mem_table.ways; i++) {
2627         mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
2628                             struct npc_exact_table_entry, list);
2629 
2630         seq_printf(s, "Way-%d\t\t\t\t\t", i);
2631     }
2632 
2633     seq_puts(s, "\n");
2634     for (i = 0; i < table->mem_table.ways; i++)
2635         seq_puts(s, "\tChan  MAC                     \t");
2636 
2637     seq_puts(s, "\n\n");
2638 
2639     /* Print mem table entries */
2640     for (i = 0; i < table->mem_table.depth; i++) {
2641         bitmap = 0;
2642         for (j = 0; j < table->mem_table.ways; j++) {
2643             if (!mem_entry[j])
2644                 continue;
2645 
2646             if (mem_entry[j]->index != i)
2647                 continue;
2648 
2649             bitmap |= BIT(j);
2650         }
2651 
2652         /* No valid entries */
2653         if (!bitmap)
2654             continue;
2655 
2656         seq_printf(s, "%d\t", i);
2657         for (j = 0; j < table->mem_table.ways; j++) {
2658             if (!(bitmap & BIT(j))) {
2659                 seq_puts(s, "nil\t\t\t\t\t");
2660                 continue;
2661             }
2662 
2663             seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
2664                    mem_entry[j]->mac);
2665             mem_entry[j] = list_next_entry(mem_entry[j], list);
2666         }
2667         seq_puts(s, "\n");
2668     }
2669 
2670 dump_cam_table:
2671 
2672     if (!table->cam_tbl_entry_cnt)
2673         goto done;
2674 
2675     seq_puts(s, "\n\tExact Match CAM Table\n");
2676     seq_puts(s, "index\tchan\tMAC\n");
2677 
2678     /* Traverse cam table entries */
2679     list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
2680         seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
2681                cam_entry->mac);
2682     }
2683 
2684 done:
2685     mutex_unlock(&table->lock);
2686     return 0;
2687 }
2688 
2689 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
2690 
2691 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
2692 {
2693     struct npc_exact_table *table;
2694     struct rvu *rvu = s->private;
2695     int i;
2696 
2697     table = rvu->hw->table;
2698 
2699     seq_puts(s, "\n\tExact Table Info\n");
2700     seq_printf(s, "Exact Match Feature : %s\n",
2701            rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
2702     if (!rvu->hw->cap.npc_exact_match_enabled)
2703         return 0;
2704 
2705     seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
2706     for (i = 0; i < table->num_drop_rules; i++)
2707         seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
2708 
2709     seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
2710     for (i = 0; i < table->num_drop_rules; i++)
2711         seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
2712 
2713     seq_puts(s, "\n\tMEM Table Info\n");
2714     seq_printf(s, "Ways : %d\n", table->mem_table.ways);
2715     seq_printf(s, "Depth : %d\n", table->mem_table.depth);
2716     seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
2717     seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
2718     seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
2719 
2720     seq_puts(s, "\n\tCAM Table Info\n");
2721     seq_printf(s, "Depth : %d\n", table->cam_table.depth);
2722 
2723     return 0;
2724 }
2725 
2726 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
2727 
2728 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
2729 {
2730     struct npc_exact_table *table;
2731     struct rvu *rvu = s->private;
2732     struct npc_key_field *field;
2733     u16 chan, pcifunc;
2734     int blkaddr, i;
2735     u64 cfg, cam1;
2736     char *str;
2737 
2738     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2739     table = rvu->hw->table;
2740 
2741     field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
2742 
2743     seq_puts(s, "\n\t Exact Hit on drop status\n");
2744     seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
2745 
2746     for (i = 0; i < table->num_drop_rules; i++) {
2747         pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
2748         cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
2749 
2750         /* channel will be always in keyword 0 */
2751         cam1 = rvu_read64(rvu, blkaddr,
2752                   NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
2753         chan = field->kw_mask[0] & cam1;
2754 
2755         str = (cfg & 1) ? "enabled" : "disabled";
2756 
2757         seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
2758                rvu_read64(rvu, blkaddr,
2759                       NPC_AF_MATCH_STATX(table->counter_idx[i])),
2760                chan, str);
2761     }
2762 
2763     return 0;
2764 }
2765 
2766 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
2767 
2768 static void rvu_dbg_npc_init(struct rvu *rvu)
2769 {
2770     rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2771 
2772     debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2773                 &rvu_dbg_npc_mcam_info_fops);
2774     debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2775                 &rvu_dbg_npc_mcam_rules_fops);
2776 
2777     debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2778                 &rvu_dbg_npc_rx_miss_act_fops);
2779 
2780     if (!rvu->hw->cap.npc_exact_match_enabled)
2781         return;
2782 
2783     debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
2784                 &rvu_dbg_npc_exact_entries_fops);
2785 
2786     debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
2787                 &rvu_dbg_npc_exact_info_fops);
2788 
2789     debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
2790                 &rvu_dbg_npc_exact_drop_cnt_fops);
2791 
2792 }
2793 
2794 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2795 {
2796     struct cpt_ctx *ctx = filp->private;
2797     u64 busy_sts = 0, free_sts = 0;
2798     u32 e_min = 0, e_max = 0, e, i;
2799     u16 max_ses, max_ies, max_aes;
2800     struct rvu *rvu = ctx->rvu;
2801     int blkaddr = ctx->blkaddr;
2802     u64 reg;
2803 
2804     reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2805     max_ses = reg & 0xffff;
2806     max_ies = (reg >> 16) & 0xffff;
2807     max_aes = (reg >> 32) & 0xffff;
2808 
2809     switch (eng_type) {
2810     case CPT_AE_TYPE:
2811         e_min = max_ses + max_ies;
2812         e_max = max_ses + max_ies + max_aes;
2813         break;
2814     case CPT_SE_TYPE:
2815         e_min = 0;
2816         e_max = max_ses;
2817         break;
2818     case CPT_IE_TYPE:
2819         e_min = max_ses;
2820         e_max = max_ses + max_ies;
2821         break;
2822     default:
2823         return -EINVAL;
2824     }
2825 
2826     for (e = e_min, i = 0; e < e_max; e++, i++) {
2827         reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2828         if (reg & 0x1)
2829             busy_sts |= 1ULL << i;
2830 
2831         if (reg & 0x2)
2832             free_sts |= 1ULL << i;
2833     }
2834     seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2835     seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2836 
2837     return 0;
2838 }
2839 
2840 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2841 {
2842     return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2843 }
2844 
2845 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2846 
2847 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2848 {
2849     return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2850 }
2851 
2852 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2853 
2854 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2855 {
2856     return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2857 }
2858 
2859 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2860 
2861 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2862 {
2863     struct cpt_ctx *ctx = filp->private;
2864     u16 max_ses, max_ies, max_aes;
2865     struct rvu *rvu = ctx->rvu;
2866     int blkaddr = ctx->blkaddr;
2867     u32 e_max, e;
2868     u64 reg;
2869 
2870     reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2871     max_ses = reg & 0xffff;
2872     max_ies = (reg >> 16) & 0xffff;
2873     max_aes = (reg >> 32) & 0xffff;
2874 
2875     e_max = max_ses + max_ies + max_aes;
2876 
2877     seq_puts(filp, "===========================================\n");
2878     for (e = 0; e < e_max; e++) {
2879         reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2880         seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2881                reg & 0xff);
2882         reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2883         seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2884                reg);
2885         reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2886         seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2887                reg);
2888         seq_puts(filp, "===========================================\n");
2889     }
2890     return 0;
2891 }
2892 
2893 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2894 
2895 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2896 {
2897     struct cpt_ctx *ctx = filp->private;
2898     int blkaddr = ctx->blkaddr;
2899     struct rvu *rvu = ctx->rvu;
2900     struct rvu_block *block;
2901     struct rvu_hwinfo *hw;
2902     u64 reg;
2903     u32 lf;
2904 
2905     hw = rvu->hw;
2906     block = &hw->block[blkaddr];
2907     if (!block->lf.bmap)
2908         return -ENODEV;
2909 
2910     seq_puts(filp, "===========================================\n");
2911     for (lf = 0; lf < block->lf.max; lf++) {
2912         reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2913         seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2914         reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2915         seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2916         reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2917         seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2918         reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2919                 (lf << block->lfshift));
2920         seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2921         seq_puts(filp, "===========================================\n");
2922     }
2923     return 0;
2924 }
2925 
2926 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2927 
2928 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2929 {
2930     struct cpt_ctx *ctx = filp->private;
2931     struct rvu *rvu = ctx->rvu;
2932     int blkaddr = ctx->blkaddr;
2933     u64 reg0, reg1;
2934 
2935     reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2936     reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2937     seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2938     reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2939     reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2940     seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2941     reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2942     seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2943     reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2944     seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2945     reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2946     seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2947     reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2948     seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2949 
2950     return 0;
2951 }
2952 
2953 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2954 
2955 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2956 {
2957     struct cpt_ctx *ctx = filp->private;
2958     struct rvu *rvu = ctx->rvu;
2959     int blkaddr = ctx->blkaddr;
2960     u64 reg;
2961 
2962     reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2963     seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2964     reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2965     seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2966     reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2967     seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2968     reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2969     seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2970     reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2971     seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2972     reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2973     seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2974     reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2975     seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2976 
2977     return 0;
2978 }
2979 
2980 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2981 
2982 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2983 {
2984     struct cpt_ctx *ctx;
2985 
2986     if (!is_block_implemented(rvu->hw, blkaddr))
2987         return;
2988 
2989     if (blkaddr == BLKADDR_CPT0) {
2990         rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2991         ctx = &rvu->rvu_dbg.cpt_ctx[0];
2992         ctx->blkaddr = BLKADDR_CPT0;
2993         ctx->rvu = rvu;
2994     } else {
2995         rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2996                               rvu->rvu_dbg.root);
2997         ctx = &rvu->rvu_dbg.cpt_ctx[1];
2998         ctx->blkaddr = BLKADDR_CPT1;
2999         ctx->rvu = rvu;
3000     }
3001 
3002     debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3003                 &rvu_dbg_cpt_pc_fops);
3004     debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3005                 &rvu_dbg_cpt_ae_sts_fops);
3006     debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3007                 &rvu_dbg_cpt_se_sts_fops);
3008     debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3009                 &rvu_dbg_cpt_ie_sts_fops);
3010     debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3011                 &rvu_dbg_cpt_engines_info_fops);
3012     debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3013                 &rvu_dbg_cpt_lfs_info_fops);
3014     debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3015                 &rvu_dbg_cpt_err_info_fops);
3016 }
3017 
3018 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3019 {
3020     if (!is_rvu_otx2(rvu))
3021         return "cn10k";
3022     else
3023         return "octeontx2";
3024 }
3025 
3026 void rvu_dbg_init(struct rvu *rvu)
3027 {
3028     rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3029 
3030     debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3031                 &rvu_dbg_rsrc_status_fops);
3032 
3033     if (!is_rvu_otx2(rvu))
3034         debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3035                     rvu, &rvu_dbg_lmtst_map_table_fops);
3036 
3037     if (!cgx_get_cgxcnt_max())
3038         goto create;
3039 
3040     if (is_rvu_otx2(rvu))
3041         debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3042                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3043     else
3044         debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3045                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3046 
3047 create:
3048     rvu_dbg_npa_init(rvu);
3049     rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3050 
3051     rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3052     rvu_dbg_cgx_init(rvu);
3053     rvu_dbg_npc_init(rvu);
3054     rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3055     rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3056 }
3057 
3058 void rvu_dbg_exit(struct rvu *rvu)
3059 {
3060     debugfs_remove_recursive(rvu->rvu_dbg.root);
3061 }
3062 
3063 #endif /* CONFIG_DEBUG_FS */