Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/pci.h>
0003 #include <linux/printk.h>
0004 #include <linux/slab.h>
0005 
0006 #include "nitrox_dev.h"
0007 #include "nitrox_csr.h"
0008 #include "nitrox_common.h"
0009 #include "nitrox_hal.h"
0010 #include "nitrox_isr.h"
0011 #include "nitrox_mbx.h"
0012 
0013 /*
0014  * One vector for each type of ring
0015  *  - NPS packet ring, AQMQ ring and ZQMQ ring
0016  */
0017 #define NR_RING_VECTORS 3
0018 #define NR_NON_RING_VECTORS 1
0019 /* base entry for packet ring/port */
0020 #define PKT_RING_MSIX_BASE 0
0021 #define NON_RING_MSIX_BASE 192
0022 
0023 /**
0024  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
0025  * @irq: irq number
0026  * @data: argument
0027  */
0028 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
0029 {
0030     struct nitrox_q_vector *qvec = data;
0031     union nps_pkt_slc_cnts slc_cnts;
0032     struct nitrox_cmdq *cmdq = qvec->cmdq;
0033 
0034     slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
0035     /* New packet on SLC output port */
0036     if (slc_cnts.s.slc_int)
0037         tasklet_hi_schedule(&qvec->resp_tasklet);
0038 
0039     return IRQ_HANDLED;
0040 }
0041 
0042 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
0043 {
0044     u64 value;
0045 
0046     /* Write 1 to clear */
0047     value = nitrox_read_csr(ndev, NPS_CORE_INT);
0048     nitrox_write_csr(ndev, NPS_CORE_INT, value);
0049 
0050     dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
0051 }
0052 
0053 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
0054 {
0055     union nps_pkt_int pkt_int;
0056     unsigned long value, offset;
0057     int i;
0058 
0059     pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
0060     dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
0061                 pkt_int.value);
0062 
0063     if (pkt_int.s.slc_err) {
0064         offset = NPS_PKT_SLC_ERR_TYPE;
0065         value = nitrox_read_csr(ndev, offset);
0066         nitrox_write_csr(ndev, offset, value);
0067         dev_err_ratelimited(DEV(ndev),
0068                     "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
0069 
0070         offset = NPS_PKT_SLC_RERR_LO;
0071         value = nitrox_read_csr(ndev, offset);
0072         nitrox_write_csr(ndev, offset, value);
0073         /* enable the solicit ports */
0074         for_each_set_bit(i, &value, BITS_PER_LONG)
0075             enable_pkt_solicit_port(ndev, i);
0076 
0077         dev_err_ratelimited(DEV(ndev),
0078                     "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
0079 
0080         offset = NPS_PKT_SLC_RERR_HI;
0081         value = nitrox_read_csr(ndev, offset);
0082         nitrox_write_csr(ndev, offset, value);
0083         dev_err_ratelimited(DEV(ndev),
0084                     "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
0085     }
0086 
0087     if (pkt_int.s.in_err) {
0088         offset = NPS_PKT_IN_ERR_TYPE;
0089         value = nitrox_read_csr(ndev, offset);
0090         nitrox_write_csr(ndev, offset, value);
0091         dev_err_ratelimited(DEV(ndev),
0092                     "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
0093         offset = NPS_PKT_IN_RERR_LO;
0094         value = nitrox_read_csr(ndev, offset);
0095         nitrox_write_csr(ndev, offset, value);
0096         /* enable the input ring */
0097         for_each_set_bit(i, &value, BITS_PER_LONG)
0098             enable_pkt_input_ring(ndev, i);
0099 
0100         dev_err_ratelimited(DEV(ndev),
0101                     "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
0102 
0103         offset = NPS_PKT_IN_RERR_HI;
0104         value = nitrox_read_csr(ndev, offset);
0105         nitrox_write_csr(ndev, offset, value);
0106         dev_err_ratelimited(DEV(ndev),
0107                     "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
0108     }
0109 }
0110 
0111 static void clear_pom_err_intr(struct nitrox_device *ndev)
0112 {
0113     u64 value;
0114 
0115     value = nitrox_read_csr(ndev, POM_INT);
0116     nitrox_write_csr(ndev, POM_INT, value);
0117     dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
0118 }
0119 
0120 static void clear_pem_err_intr(struct nitrox_device *ndev)
0121 {
0122     u64 value;
0123 
0124     value = nitrox_read_csr(ndev, PEM0_INT);
0125     nitrox_write_csr(ndev, PEM0_INT, value);
0126     dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
0127 }
0128 
0129 static void clear_lbc_err_intr(struct nitrox_device *ndev)
0130 {
0131     union lbc_int lbc_int;
0132     u64 value, offset;
0133     int i;
0134 
0135     lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
0136     dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
0137 
0138     if (lbc_int.s.dma_rd_err) {
0139         for (i = 0; i < NR_CLUSTERS; i++) {
0140             offset = EFL_CORE_VF_ERR_INT0X(i);
0141             value = nitrox_read_csr(ndev, offset);
0142             nitrox_write_csr(ndev, offset, value);
0143             offset = EFL_CORE_VF_ERR_INT1X(i);
0144             value = nitrox_read_csr(ndev, offset);
0145             nitrox_write_csr(ndev, offset, value);
0146         }
0147     }
0148 
0149     if (lbc_int.s.cam_soft_err) {
0150         dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
0151         invalidate_lbc(ndev);
0152     }
0153 
0154     if (lbc_int.s.pref_dat_len_mismatch_err) {
0155         offset = LBC_PLM_VF1_64_INT;
0156         value = nitrox_read_csr(ndev, offset);
0157         nitrox_write_csr(ndev, offset, value);
0158         offset = LBC_PLM_VF65_128_INT;
0159         value = nitrox_read_csr(ndev, offset);
0160         nitrox_write_csr(ndev, offset, value);
0161     }
0162 
0163     if (lbc_int.s.rd_dat_len_mismatch_err) {
0164         offset = LBC_ELM_VF1_64_INT;
0165         value = nitrox_read_csr(ndev, offset);
0166         nitrox_write_csr(ndev, offset, value);
0167         offset = LBC_ELM_VF65_128_INT;
0168         value = nitrox_read_csr(ndev, offset);
0169         nitrox_write_csr(ndev, offset, value);
0170     }
0171     nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
0172 }
0173 
0174 static void clear_efl_err_intr(struct nitrox_device *ndev)
0175 {
0176     int i;
0177 
0178     for (i = 0; i < NR_CLUSTERS; i++) {
0179         union efl_core_int core_int;
0180         u64 value, offset;
0181 
0182         offset = EFL_CORE_INTX(i);
0183         core_int.value = nitrox_read_csr(ndev, offset);
0184         nitrox_write_csr(ndev, offset, core_int.value);
0185         dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
0186                     i, core_int.value);
0187         if (core_int.s.se_err) {
0188             offset = EFL_CORE_SE_ERR_INTX(i);
0189             value = nitrox_read_csr(ndev, offset);
0190             nitrox_write_csr(ndev, offset, value);
0191         }
0192     }
0193 }
0194 
0195 static void clear_bmi_err_intr(struct nitrox_device *ndev)
0196 {
0197     u64 value;
0198 
0199     value = nitrox_read_csr(ndev, BMI_INT);
0200     nitrox_write_csr(ndev, BMI_INT, value);
0201     dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
0202 }
0203 
0204 static void nps_core_int_tasklet(unsigned long data)
0205 {
0206     struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
0207     struct nitrox_device *ndev = qvec->ndev;
0208 
0209     /* if pf mode do queue recovery */
0210     if (ndev->mode == __NDEV_MODE_PF) {
0211     } else {
0212         /**
0213          * if VF(s) enabled communicate the error information
0214          * to VF(s)
0215          */
0216     }
0217 }
0218 
0219 /*
0220  * nps_core_int_isr - interrupt handler for NITROX errors and
0221  *   mailbox communication
0222  */
0223 static irqreturn_t nps_core_int_isr(int irq, void *data)
0224 {
0225     struct nitrox_q_vector *qvec = data;
0226     struct nitrox_device *ndev = qvec->ndev;
0227     union nps_core_int_active core_int;
0228 
0229     core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
0230 
0231     if (core_int.s.nps_core)
0232         clear_nps_core_err_intr(ndev);
0233 
0234     if (core_int.s.nps_pkt)
0235         clear_nps_pkt_err_intr(ndev);
0236 
0237     if (core_int.s.pom)
0238         clear_pom_err_intr(ndev);
0239 
0240     if (core_int.s.pem)
0241         clear_pem_err_intr(ndev);
0242 
0243     if (core_int.s.lbc)
0244         clear_lbc_err_intr(ndev);
0245 
0246     if (core_int.s.efl)
0247         clear_efl_err_intr(ndev);
0248 
0249     if (core_int.s.bmi)
0250         clear_bmi_err_intr(ndev);
0251 
0252     /* Mailbox interrupt */
0253     if (core_int.s.mbox)
0254         nitrox_pf2vf_mbox_handler(ndev);
0255 
0256     /* If more work callback the ISR, set resend */
0257     core_int.s.resend = 1;
0258     nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
0259 
0260     return IRQ_HANDLED;
0261 }
0262 
0263 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
0264 {
0265     struct pci_dev *pdev = ndev->pdev;
0266     int i;
0267 
0268     for (i = 0; i < ndev->num_vecs; i++) {
0269         struct nitrox_q_vector *qvec;
0270         int vec;
0271 
0272         qvec = ndev->qvec + i;
0273         if (!qvec->valid)
0274             continue;
0275 
0276         /* get the vector number */
0277         vec = pci_irq_vector(pdev, i);
0278         irq_set_affinity_hint(vec, NULL);
0279         free_irq(vec, qvec);
0280 
0281         tasklet_disable(&qvec->resp_tasklet);
0282         tasklet_kill(&qvec->resp_tasklet);
0283         qvec->valid = false;
0284     }
0285     kfree(ndev->qvec);
0286     ndev->qvec = NULL;
0287     pci_free_irq_vectors(pdev);
0288 }
0289 
0290 int nitrox_register_interrupts(struct nitrox_device *ndev)
0291 {
0292     struct pci_dev *pdev = ndev->pdev;
0293     struct nitrox_q_vector *qvec;
0294     int nr_vecs, vec, cpu;
0295     int ret, i;
0296 
0297     /*
0298      * PF MSI-X vectors
0299      *
0300      * Entry 0: NPS PKT ring 0
0301      * Entry 1: AQMQ ring 0
0302      * Entry 2: ZQM ring 0
0303      * Entry 3: NPS PKT ring 1
0304      * Entry 4: AQMQ ring 1
0305      * Entry 5: ZQM ring 1
0306      * ....
0307      * Entry 192: NPS_CORE_INT_ACTIVE
0308      */
0309     nr_vecs = pci_msix_vec_count(pdev);
0310     if (nr_vecs < 0) {
0311         dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
0312         return nr_vecs;
0313     }
0314 
0315     /* Enable MSI-X */
0316     ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
0317     if (ret < 0) {
0318         dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
0319         return ret;
0320     }
0321     ndev->num_vecs = nr_vecs;
0322 
0323     ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
0324     if (!ndev->qvec) {
0325         pci_free_irq_vectors(pdev);
0326         return -ENOMEM;
0327     }
0328 
0329     /* request irqs for packet rings/ports */
0330     for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
0331         qvec = &ndev->qvec[i];
0332 
0333         qvec->ring = i / NR_RING_VECTORS;
0334         if (qvec->ring >= ndev->nr_queues)
0335             break;
0336 
0337         qvec->cmdq = &ndev->pkt_inq[qvec->ring];
0338         snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
0339         /* get the vector number */
0340         vec = pci_irq_vector(pdev, i);
0341         ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
0342         if (ret) {
0343             dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
0344                 qvec->ring);
0345             goto irq_fail;
0346         }
0347         cpu = qvec->ring % num_online_cpus();
0348         irq_set_affinity_hint(vec, get_cpu_mask(cpu));
0349 
0350         tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
0351                  (unsigned long)qvec);
0352         qvec->valid = true;
0353     }
0354 
0355     /* request irqs for non ring vectors */
0356     i = NON_RING_MSIX_BASE;
0357     qvec = &ndev->qvec[i];
0358     qvec->ndev = ndev;
0359 
0360     snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
0361     /* get the vector number */
0362     vec = pci_irq_vector(pdev, i);
0363     ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
0364     if (ret) {
0365         dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
0366         goto irq_fail;
0367     }
0368     cpu = num_online_cpus();
0369     irq_set_affinity_hint(vec, get_cpu_mask(cpu));
0370 
0371     tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
0372              (unsigned long)qvec);
0373     qvec->valid = true;
0374 
0375     return 0;
0376 
0377 irq_fail:
0378     nitrox_unregister_interrupts(ndev);
0379     return ret;
0380 }
0381 
0382 void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
0383 {
0384     struct pci_dev *pdev = ndev->pdev;
0385     int i;
0386 
0387     for (i = 0; i < ndev->num_vecs; i++) {
0388         struct nitrox_q_vector *qvec;
0389         int vec;
0390 
0391         qvec = ndev->qvec + i;
0392         if (!qvec->valid)
0393             continue;
0394 
0395         vec = ndev->iov.msix.vector;
0396         irq_set_affinity_hint(vec, NULL);
0397         free_irq(vec, qvec);
0398 
0399         tasklet_disable(&qvec->resp_tasklet);
0400         tasklet_kill(&qvec->resp_tasklet);
0401         qvec->valid = false;
0402     }
0403     kfree(ndev->qvec);
0404     ndev->qvec = NULL;
0405     pci_disable_msix(pdev);
0406 }
0407 
0408 int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
0409 {
0410     struct pci_dev *pdev = ndev->pdev;
0411     struct nitrox_q_vector *qvec;
0412     int vec, cpu;
0413     int ret;
0414 
0415     /**
0416      * only non ring vectors i.e Entry 192 is available
0417      * for PF in SR-IOV mode.
0418      */
0419     ndev->iov.msix.entry = NON_RING_MSIX_BASE;
0420     ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
0421     if (ret) {
0422         dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
0423             NON_RING_MSIX_BASE);
0424         return ret;
0425     }
0426 
0427     qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
0428     if (!qvec) {
0429         pci_disable_msix(pdev);
0430         return -ENOMEM;
0431     }
0432     qvec->ndev = ndev;
0433 
0434     ndev->qvec = qvec;
0435     ndev->num_vecs = NR_NON_RING_VECTORS;
0436     snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
0437          NON_RING_MSIX_BASE);
0438 
0439     vec = ndev->iov.msix.vector;
0440     ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
0441     if (ret) {
0442         dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
0443             NON_RING_MSIX_BASE);
0444         goto iov_irq_fail;
0445     }
0446     cpu = num_online_cpus();
0447     irq_set_affinity_hint(vec, get_cpu_mask(cpu));
0448 
0449     tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
0450              (unsigned long)qvec);
0451     qvec->valid = true;
0452 
0453     return 0;
0454 
0455 iov_irq_fail:
0456     nitrox_sriov_unregister_interrupts(ndev);
0457     return ret;
0458 }