Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2020 Marvell. */
0003 
0004 #include "otx2_cpt_common.h"
0005 #include "otx2_cptlf.h"
0006 #include "rvu_reg.h"
0007 
0008 #define CPT_TIMER_HOLD 0x03F
0009 #define CPT_COUNT_HOLD 32
0010 
0011 static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
0012                     int time_wait)
0013 {
0014     union otx2_cptx_lf_done_wait done_wait;
0015 
0016     done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0017                       OTX2_CPT_LF_DONE_WAIT);
0018     done_wait.s.time_wait = time_wait;
0019     otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0020              OTX2_CPT_LF_DONE_WAIT, done_wait.u);
0021 }
0022 
0023 static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
0024 {
0025     union otx2_cptx_lf_done_wait done_wait;
0026 
0027     done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0028                       OTX2_CPT_LF_DONE_WAIT);
0029     done_wait.s.num_wait = num_wait;
0030     otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0031              OTX2_CPT_LF_DONE_WAIT, done_wait.u);
0032 }
0033 
0034 static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
0035                      int time_wait)
0036 {
0037     int slot;
0038 
0039     for (slot = 0; slot < lfs->lfs_num; slot++)
0040         cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
0041 }
0042 
0043 static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
0044 {
0045     int slot;
0046 
0047     for (slot = 0; slot < lfs->lfs_num; slot++)
0048         cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
0049 }
0050 
0051 static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
0052 {
0053     struct otx2_cptlfs_info *lfs = lf->lfs;
0054     union otx2_cptx_af_lf_ctrl lf_ctrl;
0055     int ret;
0056 
0057     ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
0058                    CPT_AF_LFX_CTL(lf->slot),
0059                    &lf_ctrl.u, lfs->blkaddr);
0060     if (ret)
0061         return ret;
0062 
0063     lf_ctrl.s.pri = pri ? 1 : 0;
0064 
0065     ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
0066                     CPT_AF_LFX_CTL(lf->slot),
0067                     lf_ctrl.u, lfs->blkaddr);
0068     return ret;
0069 }
0070 
0071 static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
0072                    int eng_grps_mask)
0073 {
0074     struct otx2_cptlfs_info *lfs = lf->lfs;
0075     union otx2_cptx_af_lf_ctrl lf_ctrl;
0076     int ret;
0077 
0078     ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
0079                    CPT_AF_LFX_CTL(lf->slot),
0080                    &lf_ctrl.u, lfs->blkaddr);
0081     if (ret)
0082         return ret;
0083 
0084     lf_ctrl.s.grp = eng_grps_mask;
0085 
0086     ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
0087                     CPT_AF_LFX_CTL(lf->slot),
0088                     lf_ctrl.u, lfs->blkaddr);
0089     return ret;
0090 }
0091 
0092 static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
0093                  int eng_grp_mask, int pri)
0094 {
0095     int slot, ret = 0;
0096 
0097     for (slot = 0; slot < lfs->lfs_num; slot++) {
0098         ret = cptlf_set_pri(&lfs->lf[slot], pri);
0099         if (ret)
0100             return ret;
0101 
0102         ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
0103         if (ret)
0104             return ret;
0105     }
0106     return ret;
0107 }
0108 
0109 static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
0110 {
0111     /* Disable instruction queues */
0112     otx2_cptlf_disable_iqueues(lfs);
0113 
0114     /* Set instruction queues base addresses */
0115     otx2_cptlf_set_iqueues_base_addr(lfs);
0116 
0117     /* Set instruction queues sizes */
0118     otx2_cptlf_set_iqueues_size(lfs);
0119 
0120     /* Set done interrupts time wait */
0121     cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
0122 
0123     /* Set done interrupts num wait */
0124     cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
0125 
0126     /* Enable instruction queues */
0127     otx2_cptlf_enable_iqueues(lfs);
0128 }
0129 
0130 static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
0131 {
0132     /* Disable instruction queues */
0133     otx2_cptlf_disable_iqueues(lfs);
0134 }
0135 
0136 static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
0137 {
0138     union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
0139     u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
0140                OTX2_CPT_LF_MISC_INT_ENA_W1C;
0141     int slot;
0142 
0143     irq_misc.s.fault = 0x1;
0144     irq_misc.s.hwerr = 0x1;
0145     irq_misc.s.irde = 0x1;
0146     irq_misc.s.nqerr = 0x1;
0147     irq_misc.s.nwrp = 0x1;
0148 
0149     for (slot = 0; slot < lfs->lfs_num; slot++)
0150         otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
0151                  irq_misc.u);
0152 }
0153 
0154 static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
0155 {
0156     int slot;
0157 
0158     /* Enable done interrupts */
0159     for (slot = 0; slot < lfs->lfs_num; slot++)
0160         otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
0161                  OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
0162     /* Enable Misc interrupts */
0163     cptlf_set_misc_intrs(lfs, true);
0164 }
0165 
0166 static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
0167 {
0168     int slot;
0169 
0170     for (slot = 0; slot < lfs->lfs_num; slot++)
0171         otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
0172                  OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
0173     cptlf_set_misc_intrs(lfs, false);
0174 }
0175 
0176 static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
0177 {
0178     union otx2_cptx_lf_done irq_cnt;
0179 
0180     irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0181                     OTX2_CPT_LF_DONE);
0182     return irq_cnt.s.done;
0183 }
0184 
0185 static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
0186 {
0187     union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
0188     struct otx2_cptlf_info *lf = arg;
0189     struct device *dev;
0190 
0191     dev = &lf->lfs->pdev->dev;
0192     irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0193                      OTX2_CPT_LF_MISC_INT);
0194     irq_misc_ack.u = 0x0;
0195 
0196     if (irq_misc.s.fault) {
0197         dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
0198             lf->slot);
0199         irq_misc_ack.s.fault = 0x1;
0200 
0201     } else if (irq_misc.s.hwerr) {
0202         dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
0203             lf->slot);
0204         irq_misc_ack.s.hwerr = 0x1;
0205 
0206     } else if (irq_misc.s.nwrp) {
0207         dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
0208             lf->slot);
0209         irq_misc_ack.s.nwrp = 0x1;
0210 
0211     } else if (irq_misc.s.irde) {
0212         dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
0213         irq_misc_ack.s.irde = 0x1;
0214 
0215     } else if (irq_misc.s.nqerr) {
0216         dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
0217         irq_misc_ack.s.nqerr = 0x1;
0218 
0219     } else {
0220         dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
0221         return IRQ_NONE;
0222     }
0223 
0224     /* Acknowledge interrupts */
0225     otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0226              OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
0227 
0228     return IRQ_HANDLED;
0229 }
0230 
0231 static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
0232 {
0233     union otx2_cptx_lf_done_wait done_wait;
0234     struct otx2_cptlf_info *lf = arg;
0235     int irq_cnt;
0236 
0237     /* Read the number of completed requests */
0238     irq_cnt = cptlf_read_done_cnt(lf);
0239     if (irq_cnt) {
0240         done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
0241                           lf->slot, OTX2_CPT_LF_DONE_WAIT);
0242         /* Acknowledge the number of completed requests */
0243         otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0244                  OTX2_CPT_LF_DONE_ACK, irq_cnt);
0245 
0246         otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
0247                  OTX2_CPT_LF_DONE_WAIT, done_wait.u);
0248         if (unlikely(!lf->wqe)) {
0249             dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
0250                 lf->slot);
0251             return IRQ_NONE;
0252         }
0253 
0254         /* Schedule processing of completed requests */
0255         tasklet_hi_schedule(&lf->wqe->work);
0256     }
0257     return IRQ_HANDLED;
0258 }
0259 
0260 void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
0261 {
0262     int i, offs, vector;
0263 
0264     for (i = 0; i < lfs->lfs_num; i++) {
0265         for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
0266             if (!lfs->lf[i].is_irq_reg[offs])
0267                 continue;
0268 
0269             vector = pci_irq_vector(lfs->pdev,
0270                         lfs->lf[i].msix_offset + offs);
0271             free_irq(vector, &lfs->lf[i]);
0272             lfs->lf[i].is_irq_reg[offs] = false;
0273         }
0274     }
0275     cptlf_disable_intrs(lfs);
0276 }
0277 
0278 static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
0279                      int lf_num, int irq_offset,
0280                      irq_handler_t handler)
0281 {
0282     int ret, vector;
0283 
0284     vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
0285                 irq_offset);
0286     ret = request_irq(vector, handler, 0,
0287               lfs->lf[lf_num].irq_name[irq_offset],
0288               &lfs->lf[lf_num]);
0289     if (ret)
0290         return ret;
0291 
0292     lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
0293 
0294     return ret;
0295 }
0296 
0297 int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
0298 {
0299     int irq_offs, ret, i;
0300 
0301     for (i = 0; i < lfs->lfs_num; i++) {
0302         irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
0303         snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
0304         ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
0305                             cptlf_misc_intr_handler);
0306         if (ret)
0307             goto free_irq;
0308 
0309         irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
0310         snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
0311              i);
0312         ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
0313                             cptlf_done_intr_handler);
0314         if (ret)
0315             goto free_irq;
0316     }
0317     cptlf_enable_intrs(lfs);
0318     return 0;
0319 
0320 free_irq:
0321     otx2_cptlf_unregister_interrupts(lfs);
0322     return ret;
0323 }
0324 
0325 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
0326 {
0327     int slot, offs;
0328 
0329     for (slot = 0; slot < lfs->lfs_num; slot++) {
0330         for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
0331             irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
0332                           lfs->lf[slot].msix_offset +
0333                           offs), NULL);
0334         free_cpumask_var(lfs->lf[slot].affinity_mask);
0335     }
0336 }
0337 
0338 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
0339 {
0340     struct otx2_cptlf_info *lf = lfs->lf;
0341     int slot, offs, ret;
0342 
0343     for (slot = 0; slot < lfs->lfs_num; slot++) {
0344         if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
0345             dev_err(&lfs->pdev->dev,
0346                 "cpumask allocation failed for LF %d", slot);
0347             ret = -ENOMEM;
0348             goto free_affinity_mask;
0349         }
0350 
0351         cpumask_set_cpu(cpumask_local_spread(slot,
0352                 dev_to_node(&lfs->pdev->dev)),
0353                 lf[slot].affinity_mask);
0354 
0355         for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
0356             ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
0357                         lf[slot].msix_offset + offs),
0358                         lf[slot].affinity_mask);
0359             if (ret)
0360                 goto free_affinity_mask;
0361         }
0362     }
0363     return 0;
0364 
0365 free_affinity_mask:
0366     otx2_cptlf_free_irqs_affinity(lfs);
0367     return ret;
0368 }
0369 
0370 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
0371             int lfs_num)
0372 {
0373     int slot, ret;
0374 
0375     if (!lfs->pdev || !lfs->reg_base)
0376         return -EINVAL;
0377 
0378     lfs->lfs_num = lfs_num;
0379     for (slot = 0; slot < lfs->lfs_num; slot++) {
0380         lfs->lf[slot].lfs = lfs;
0381         lfs->lf[slot].slot = slot;
0382         if (lfs->lmt_base)
0383             lfs->lf[slot].lmtline = lfs->lmt_base +
0384                         (slot * LMTLINE_SIZE);
0385         else
0386             lfs->lf[slot].lmtline = lfs->reg_base +
0387                 OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
0388                          OTX2_CPT_LMT_LF_LMTLINEX(0));
0389 
0390         lfs->lf[slot].ioreg = lfs->reg_base +
0391             OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
0392                          OTX2_CPT_LF_NQX(0));
0393     }
0394     /* Send request to attach LFs */
0395     ret = otx2_cpt_attach_rscrs_msg(lfs);
0396     if (ret)
0397         goto clear_lfs_num;
0398 
0399     ret = otx2_cpt_alloc_instruction_queues(lfs);
0400     if (ret) {
0401         dev_err(&lfs->pdev->dev,
0402             "Allocating instruction queues failed\n");
0403         goto detach_rsrcs;
0404     }
0405     cptlf_hw_init(lfs);
0406     /*
0407      * Allow each LF to execute requests destined to any of 8 engine
0408      * groups and set queue priority of each LF to high
0409      */
0410     ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
0411     if (ret)
0412         goto free_iq;
0413 
0414     return 0;
0415 
0416 free_iq:
0417     otx2_cpt_free_instruction_queues(lfs);
0418     cptlf_hw_cleanup(lfs);
0419 detach_rsrcs:
0420     otx2_cpt_detach_rsrcs_msg(lfs);
0421 clear_lfs_num:
0422     lfs->lfs_num = 0;
0423     return ret;
0424 }
0425 
0426 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
0427 {
0428     lfs->lfs_num = 0;
0429     /* Cleanup LFs hardware side */
0430     cptlf_hw_cleanup(lfs);
0431     /* Send request to detach LFs */
0432     otx2_cpt_detach_rsrcs_msg(lfs);
0433 }