Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2020 Marvell. */
0003 
0004 #include <linux/firmware.h>
0005 #include "otx2_cpt_hw_types.h"
0006 #include "otx2_cpt_common.h"
0007 #include "otx2_cpt_devlink.h"
0008 #include "otx2_cptpf_ucode.h"
0009 #include "otx2_cptpf.h"
0010 #include "cn10k_cpt.h"
0011 #include "rvu_reg.h"
0012 
0013 #define OTX2_CPT_DRV_NAME    "rvu_cptpf"
0014 #define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
0015 
0016 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
0017                     int num_vfs)
0018 {
0019     int ena_bits;
0020 
0021     /* Clear any pending interrupts */
0022     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0023              RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
0024     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0025              RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
0026 
0027     /* Enable VF interrupts for VFs from 0 to 63 */
0028     ena_bits = ((num_vfs - 1) % 64);
0029     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0030              RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
0031              GENMASK_ULL(ena_bits, 0));
0032 
0033     if (num_vfs > 64) {
0034         /* Enable VF interrupts for VFs from 64 to 127 */
0035         ena_bits = num_vfs - 64 - 1;
0036         otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0037                 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
0038                 GENMASK_ULL(ena_bits, 0));
0039     }
0040 }
0041 
0042 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
0043                      int num_vfs)
0044 {
0045     int vector;
0046 
0047     /* Disable VF-PF interrupts */
0048     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0049              RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
0050     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0051              RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
0052     /* Clear any pending interrupts */
0053     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0054              RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
0055 
0056     vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
0057     free_irq(vector, cptpf);
0058 
0059     if (num_vfs > 64) {
0060         otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0061                  RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
0062         vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
0063         free_irq(vector, cptpf);
0064     }
0065 }
0066 
0067 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
0068                      int num_vfs)
0069 {
0070     /* Clear FLR interrupt if any */
0071     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
0072              INTR_MASK(num_vfs));
0073 
0074     /* Enable VF FLR interrupts */
0075     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0076              RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
0077     /* Clear ME interrupt if any */
0078     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
0079              INTR_MASK(num_vfs));
0080     /* Enable VF ME interrupts */
0081     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0082              RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
0083 
0084     if (num_vfs <= 64)
0085         return;
0086 
0087     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
0088              INTR_MASK(num_vfs - 64));
0089     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0090              RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
0091 
0092     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
0093              INTR_MASK(num_vfs - 64));
0094     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0095              RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
0096 }
0097 
0098 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
0099                        int num_vfs)
0100 {
0101     int vector;
0102 
0103     /* Disable VF FLR interrupts */
0104     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0105              RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
0106     vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
0107     free_irq(vector, cptpf);
0108 
0109     /* Disable VF ME interrupts */
0110     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0111              RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
0112     vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
0113     free_irq(vector, cptpf);
0114 
0115     if (num_vfs <= 64)
0116         return;
0117 
0118     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0119              RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
0120     vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
0121     free_irq(vector, cptpf);
0122 
0123     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0124              RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
0125     vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
0126     free_irq(vector, cptpf);
0127 }
0128 
0129 static void cptpf_flr_wq_handler(struct work_struct *work)
0130 {
0131     struct cptpf_flr_work *flr_work;
0132     struct otx2_cptpf_dev *pf;
0133     struct mbox_msghdr *req;
0134     struct otx2_mbox *mbox;
0135     int vf, reg = 0;
0136 
0137     flr_work = container_of(work, struct cptpf_flr_work, work);
0138     pf = flr_work->pf;
0139     mbox = &pf->afpf_mbox;
0140 
0141     vf = flr_work - pf->flr_work;
0142 
0143     mutex_lock(&pf->lock);
0144     req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
0145                       sizeof(struct msg_rsp));
0146     if (!req) {
0147         mutex_unlock(&pf->lock);
0148         return;
0149     }
0150 
0151     req->sig = OTX2_MBOX_REQ_SIG;
0152     req->id = MBOX_MSG_VF_FLR;
0153     req->pcifunc &= RVU_PFVF_FUNC_MASK;
0154     req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
0155 
0156     otx2_cpt_send_mbox_msg(mbox, pf->pdev);
0157     if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
0158 
0159         if (vf >= 64) {
0160             reg = 1;
0161             vf = vf - 64;
0162         }
0163         /* Clear transaction pending register */
0164         otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
0165                  RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
0166         otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
0167                  RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
0168     }
0169     mutex_unlock(&pf->lock);
0170 }
0171 
0172 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
0173 {
0174     int reg, dev, vf, start_vf, num_reg = 1;
0175     struct otx2_cptpf_dev *cptpf = arg;
0176     u64 intr;
0177 
0178     if (cptpf->max_vfs > 64)
0179         num_reg = 2;
0180 
0181     for (reg = 0; reg < num_reg; reg++) {
0182         intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
0183                        RVU_PF_VFFLR_INTX(reg));
0184         if (!intr)
0185             continue;
0186         start_vf = 64 * reg;
0187         for (vf = 0; vf < 64; vf++) {
0188             if (!(intr & BIT_ULL(vf)))
0189                 continue;
0190             dev = vf + start_vf;
0191             queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
0192             /* Clear interrupt */
0193             otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0194                      RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
0195             /* Disable the interrupt */
0196             otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0197                      RVU_PF_VFFLR_INT_ENA_W1CX(reg),
0198                      BIT_ULL(vf));
0199         }
0200     }
0201     return IRQ_HANDLED;
0202 }
0203 
0204 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
0205 {
0206     struct otx2_cptpf_dev *cptpf = arg;
0207     int reg, vf, num_reg = 1;
0208     u64 intr;
0209 
0210     if (cptpf->max_vfs > 64)
0211         num_reg = 2;
0212 
0213     for (reg = 0; reg < num_reg; reg++) {
0214         intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
0215                        RVU_PF_VFME_INTX(reg));
0216         if (!intr)
0217             continue;
0218         for (vf = 0; vf < 64; vf++) {
0219             if (!(intr & BIT_ULL(vf)))
0220                 continue;
0221             otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0222                      RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
0223             /* Clear interrupt */
0224             otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
0225                      RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
0226         }
0227     }
0228     return IRQ_HANDLED;
0229 }
0230 
0231 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
0232                        int num_vfs)
0233 {
0234     cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
0235     cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
0236 }
0237 
0238 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
0239 {
0240     struct pci_dev *pdev = cptpf->pdev;
0241     struct device *dev = &pdev->dev;
0242     int ret, vector;
0243 
0244     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
0245     /* Register VF-PF mailbox interrupt handler */
0246     ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
0247               cptpf);
0248     if (ret) {
0249         dev_err(dev,
0250             "IRQ registration failed for PFVF mbox0 irq\n");
0251         return ret;
0252     }
0253     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
0254     /* Register VF FLR interrupt handler */
0255     ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
0256     if (ret) {
0257         dev_err(dev,
0258             "IRQ registration failed for VFFLR0 irq\n");
0259         goto free_mbox0_irq;
0260     }
0261     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
0262     /* Register VF ME interrupt handler */
0263     ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
0264     if (ret) {
0265         dev_err(dev,
0266             "IRQ registration failed for PFVF mbox0 irq\n");
0267         goto free_flr0_irq;
0268     }
0269 
0270     if (num_vfs > 64) {
0271         vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
0272         ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
0273                   "CPTVFPF Mbox1", cptpf);
0274         if (ret) {
0275             dev_err(dev,
0276                 "IRQ registration failed for PFVF mbox1 irq\n");
0277             goto free_me0_irq;
0278         }
0279         vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
0280         /* Register VF FLR interrupt handler */
0281         ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
0282                   cptpf);
0283         if (ret) {
0284             dev_err(dev,
0285                 "IRQ registration failed for VFFLR1 irq\n");
0286             goto free_mbox1_irq;
0287         }
0288         vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
0289         /* Register VF FLR interrupt handler */
0290         ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
0291                   cptpf);
0292         if (ret) {
0293             dev_err(dev,
0294                 "IRQ registration failed for VFFLR1 irq\n");
0295             goto free_flr1_irq;
0296         }
0297     }
0298     cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
0299     cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
0300 
0301     return 0;
0302 
0303 free_flr1_irq:
0304     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
0305     free_irq(vector, cptpf);
0306 free_mbox1_irq:
0307     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
0308     free_irq(vector, cptpf);
0309 free_me0_irq:
0310     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
0311     free_irq(vector, cptpf);
0312 free_flr0_irq:
0313     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
0314     free_irq(vector, cptpf);
0315 free_mbox0_irq:
0316     vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
0317     free_irq(vector, cptpf);
0318     return ret;
0319 }
0320 
0321 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
0322 {
0323     if (!pf->flr_wq)
0324         return;
0325     destroy_workqueue(pf->flr_wq);
0326     pf->flr_wq = NULL;
0327     kfree(pf->flr_work);
0328 }
0329 
0330 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
0331 {
0332     int vf;
0333 
0334     cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
0335     if (!cptpf->flr_wq)
0336         return -ENOMEM;
0337 
0338     cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
0339                   GFP_KERNEL);
0340     if (!cptpf->flr_work)
0341         goto destroy_wq;
0342 
0343     for (vf = 0; vf < num_vfs; vf++) {
0344         cptpf->flr_work[vf].pf = cptpf;
0345         INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
0346     }
0347     return 0;
0348 
0349 destroy_wq:
0350     destroy_workqueue(cptpf->flr_wq);
0351     return -ENOMEM;
0352 }
0353 
0354 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
0355 {
0356     struct device *dev = &cptpf->pdev->dev;
0357     u64 vfpf_mbox_base;
0358     int err, i;
0359 
0360     cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
0361                           WQ_UNBOUND | WQ_HIGHPRI |
0362                           WQ_MEM_RECLAIM, 1);
0363     if (!cptpf->vfpf_mbox_wq)
0364         return -ENOMEM;
0365 
0366     /* Map VF-PF mailbox memory */
0367     if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
0368         vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
0369     else
0370         vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
0371 
0372     if (!vfpf_mbox_base) {
0373         dev_err(dev, "VF-PF mailbox address not configured\n");
0374         err = -ENOMEM;
0375         goto free_wqe;
0376     }
0377     cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
0378                         MBOX_SIZE * cptpf->max_vfs);
0379     if (!cptpf->vfpf_mbox_base) {
0380         dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
0381         err = -ENOMEM;
0382         goto free_wqe;
0383     }
0384     err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
0385                  cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
0386                  num_vfs);
0387     if (err)
0388         goto free_wqe;
0389 
0390     for (i = 0; i < num_vfs; i++) {
0391         cptpf->vf[i].vf_id = i;
0392         cptpf->vf[i].cptpf = cptpf;
0393         cptpf->vf[i].intr_idx = i % 64;
0394         INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
0395               otx2_cptpf_vfpf_mbox_handler);
0396     }
0397     return 0;
0398 
0399 free_wqe:
0400     destroy_workqueue(cptpf->vfpf_mbox_wq);
0401     return err;
0402 }
0403 
0404 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
0405 {
0406     destroy_workqueue(cptpf->vfpf_mbox_wq);
0407     otx2_mbox_destroy(&cptpf->vfpf_mbox);
0408 }
0409 
0410 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
0411 {
0412     /* Disable AF-PF interrupt */
0413     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
0414              0x1ULL);
0415     /* Clear interrupt if any */
0416     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
0417 }
0418 
0419 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
0420 {
0421     struct pci_dev *pdev = cptpf->pdev;
0422     struct device *dev = &pdev->dev;
0423     int ret, irq;
0424 
0425     irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
0426     /* Register AF-PF mailbox interrupt handler */
0427     ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
0428                    "CPTAFPF Mbox", cptpf);
0429     if (ret) {
0430         dev_err(dev,
0431             "IRQ registration failed for PFAF mbox irq\n");
0432         return ret;
0433     }
0434     /* Clear interrupt if any, to avoid spurious interrupts */
0435     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
0436     /* Enable AF-PF interrupt */
0437     otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
0438              0x1ULL);
0439 
0440     ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
0441     if (ret) {
0442         dev_warn(dev,
0443              "AF not responding to mailbox, deferring probe\n");
0444         cptpf_disable_afpf_mbox_intr(cptpf);
0445         return -EPROBE_DEFER;
0446     }
0447     return 0;
0448 }
0449 
0450 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
0451 {
0452     struct pci_dev *pdev = cptpf->pdev;
0453     resource_size_t offset;
0454     int err;
0455 
0456     cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
0457                           WQ_UNBOUND | WQ_HIGHPRI |
0458                           WQ_MEM_RECLAIM, 1);
0459     if (!cptpf->afpf_mbox_wq)
0460         return -ENOMEM;
0461 
0462     offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
0463     /* Map AF-PF mailbox memory */
0464     cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
0465     if (!cptpf->afpf_mbox_base) {
0466         dev_err(&pdev->dev, "Unable to map BAR4\n");
0467         err = -ENOMEM;
0468         goto error;
0469     }
0470 
0471     err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
0472                  pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
0473     if (err)
0474         goto error;
0475 
0476     INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
0477     mutex_init(&cptpf->lock);
0478     return 0;
0479 
0480 error:
0481     destroy_workqueue(cptpf->afpf_mbox_wq);
0482     return err;
0483 }
0484 
0485 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
0486 {
0487     destroy_workqueue(cptpf->afpf_mbox_wq);
0488     otx2_mbox_destroy(&cptpf->afpf_mbox);
0489 }
0490 
0491 static ssize_t kvf_limits_show(struct device *dev,
0492                    struct device_attribute *attr, char *buf)
0493 {
0494     struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
0495 
0496     return sprintf(buf, "%d\n", cptpf->kvf_limits);
0497 }
0498 
0499 static ssize_t kvf_limits_store(struct device *dev,
0500                 struct device_attribute *attr,
0501                 const char *buf, size_t count)
0502 {
0503     struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
0504     int lfs_num;
0505     int ret;
0506 
0507     ret = kstrtoint(buf, 0, &lfs_num);
0508     if (ret)
0509         return ret;
0510     if (lfs_num < 1 || lfs_num > num_online_cpus()) {
0511         dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
0512             lfs_num, num_online_cpus());
0513         return -EINVAL;
0514     }
0515     cptpf->kvf_limits = lfs_num;
0516 
0517     return count;
0518 }
0519 
0520 static DEVICE_ATTR_RW(kvf_limits);
0521 static struct attribute *cptpf_attrs[] = {
0522     &dev_attr_kvf_limits.attr,
0523     NULL
0524 };
0525 
0526 static const struct attribute_group cptpf_sysfs_group = {
0527     .attrs = cptpf_attrs,
0528 };
0529 
0530 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
0531 {
0532     u64 rev;
0533 
0534     rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
0535                   RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
0536     rev = (rev >> 12) & 0xFF;
0537     /*
0538      * Check if AF has setup revision for RVUM block, otherwise
0539      * driver probe should be deferred until AF driver comes up
0540      */
0541     if (!rev) {
0542         dev_warn(&cptpf->pdev->dev,
0543              "AF is not initialized, deferring probe\n");
0544         return -EPROBE_DEFER;
0545     }
0546     return 0;
0547 }
0548 
0549 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
0550 {
0551     int timeout = 10, ret;
0552     u64 reg = 0;
0553 
0554     ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0555                     CPT_AF_BLK_RST, 0x1, blkaddr);
0556     if (ret)
0557         return ret;
0558 
0559     do {
0560         ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0561                        CPT_AF_BLK_RST, &reg, blkaddr);
0562         if (ret)
0563             return ret;
0564 
0565         if (!((reg >> 63) & 0x1))
0566             break;
0567 
0568         usleep_range(10000, 20000);
0569         if (timeout-- < 0)
0570             return -EBUSY;
0571     } while (1);
0572 
0573     return ret;
0574 }
0575 
0576 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
0577 {
0578     int ret = 0;
0579 
0580     if (cptpf->has_cpt1) {
0581         ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
0582         if (ret)
0583             return ret;
0584     }
0585     return cptx_device_reset(cptpf, BLKADDR_CPT0);
0586 }
0587 
0588 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
0589 {
0590     u64 cfg;
0591 
0592     cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
0593                   RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
0594     if (cfg & BIT_ULL(11))
0595         cptpf->has_cpt1 = true;
0596 }
0597 
0598 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
0599 {
0600     union otx2_cptx_af_constants1 af_cnsts1 = {0};
0601     int ret = 0;
0602 
0603     /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
0604     cptpf_check_block_implemented(cptpf);
0605     /* Reset the CPT PF device */
0606     ret = cptpf_device_reset(cptpf);
0607     if (ret)
0608         return ret;
0609 
0610     /* Get number of SE, IE and AE engines */
0611     ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0612                    CPT_AF_CONSTANTS1, &af_cnsts1.u,
0613                    BLKADDR_CPT0);
0614     if (ret)
0615         return ret;
0616 
0617     cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
0618     cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
0619     cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
0620 
0621     /* Disable all cores */
0622     ret = otx2_cpt_disable_all_cores(cptpf);
0623 
0624     return ret;
0625 }
0626 
0627 static int cptpf_sriov_disable(struct pci_dev *pdev)
0628 {
0629     struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
0630     int num_vfs = pci_num_vf(pdev);
0631 
0632     if (!num_vfs)
0633         return 0;
0634 
0635     pci_disable_sriov(pdev);
0636     cptpf_unregister_vfpf_intr(cptpf, num_vfs);
0637     cptpf_flr_wq_destroy(cptpf);
0638     cptpf_vfpf_mbox_destroy(cptpf);
0639     module_put(THIS_MODULE);
0640     cptpf->enabled_vfs = 0;
0641 
0642     return 0;
0643 }
0644 
0645 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
0646 {
0647     struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
0648     int ret;
0649 
0650     /* Initialize VF<=>PF mailbox */
0651     ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
0652     if (ret)
0653         return ret;
0654 
0655     ret = cptpf_flr_wq_init(cptpf, num_vfs);
0656     if (ret)
0657         goto destroy_mbox;
0658     /* Register VF<=>PF mailbox interrupt */
0659     ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
0660     if (ret)
0661         goto destroy_flr;
0662 
0663     /* Get CPT HW capabilities using LOAD_FVC operation. */
0664     ret = otx2_cpt_discover_eng_capabilities(cptpf);
0665     if (ret)
0666         goto disable_intr;
0667 
0668     ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
0669     if (ret)
0670         goto disable_intr;
0671 
0672     cptpf->enabled_vfs = num_vfs;
0673     ret = pci_enable_sriov(pdev, num_vfs);
0674     if (ret)
0675         goto disable_intr;
0676 
0677     dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
0678 
0679     try_module_get(THIS_MODULE);
0680     return num_vfs;
0681 
0682 disable_intr:
0683     cptpf_unregister_vfpf_intr(cptpf, num_vfs);
0684     cptpf->enabled_vfs = 0;
0685 destroy_flr:
0686     cptpf_flr_wq_destroy(cptpf);
0687 destroy_mbox:
0688     cptpf_vfpf_mbox_destroy(cptpf);
0689     return ret;
0690 }
0691 
0692 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
0693 {
0694     if (num_vfs > 0) {
0695         return cptpf_sriov_enable(pdev, num_vfs);
0696     } else {
0697         return cptpf_sriov_disable(pdev);
0698     }
0699 }
0700 
0701 static int otx2_cptpf_probe(struct pci_dev *pdev,
0702                 const struct pci_device_id *ent)
0703 {
0704     struct device *dev = &pdev->dev;
0705     struct otx2_cptpf_dev *cptpf;
0706     int err;
0707 
0708     cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
0709     if (!cptpf)
0710         return -ENOMEM;
0711 
0712     err = pcim_enable_device(pdev);
0713     if (err) {
0714         dev_err(dev, "Failed to enable PCI device\n");
0715         goto clear_drvdata;
0716     }
0717 
0718     err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
0719     if (err) {
0720         dev_err(dev, "Unable to get usable DMA configuration\n");
0721         goto clear_drvdata;
0722     }
0723     /* Map PF's configuration registers */
0724     err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
0725                          OTX2_CPT_DRV_NAME);
0726     if (err) {
0727         dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
0728         goto clear_drvdata;
0729     }
0730     pci_set_master(pdev);
0731     pci_set_drvdata(pdev, cptpf);
0732     cptpf->pdev = pdev;
0733 
0734     cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
0735 
0736     /* Check if AF driver is up, otherwise defer probe */
0737     err = cpt_is_pf_usable(cptpf);
0738     if (err)
0739         goto clear_drvdata;
0740 
0741     err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
0742                     RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
0743     if (err < 0) {
0744         dev_err(dev, "Request for %d msix vectors failed\n",
0745             RVU_PF_INT_VEC_CNT);
0746         goto clear_drvdata;
0747     }
0748     otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
0749     /* Initialize AF-PF mailbox */
0750     err = cptpf_afpf_mbox_init(cptpf);
0751     if (err)
0752         goto clear_drvdata;
0753     /* Register mailbox interrupt */
0754     err = cptpf_register_afpf_mbox_intr(cptpf);
0755     if (err)
0756         goto destroy_afpf_mbox;
0757 
0758     cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
0759 
0760     err = cn10k_cptpf_lmtst_init(cptpf);
0761     if (err)
0762         goto unregister_intr;
0763 
0764     /* Initialize CPT PF device */
0765     err = cptpf_device_init(cptpf);
0766     if (err)
0767         goto unregister_intr;
0768 
0769     /* Initialize engine groups */
0770     err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
0771     if (err)
0772         goto unregister_intr;
0773 
0774     err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
0775     if (err)
0776         goto cleanup_eng_grps;
0777 
0778     err = otx2_cpt_register_dl(cptpf);
0779     if (err)
0780         goto sysfs_grp_del;
0781 
0782     return 0;
0783 
0784 sysfs_grp_del:
0785     sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
0786 cleanup_eng_grps:
0787     otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
0788 unregister_intr:
0789     cptpf_disable_afpf_mbox_intr(cptpf);
0790 destroy_afpf_mbox:
0791     cptpf_afpf_mbox_destroy(cptpf);
0792 clear_drvdata:
0793     pci_set_drvdata(pdev, NULL);
0794     return err;
0795 }
0796 
0797 static void otx2_cptpf_remove(struct pci_dev *pdev)
0798 {
0799     struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
0800 
0801     if (!cptpf)
0802         return;
0803 
0804     cptpf_sriov_disable(pdev);
0805     otx2_cpt_unregister_dl(cptpf);
0806     /* Delete sysfs entry created for kernel VF limits */
0807     sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
0808     /* Cleanup engine groups */
0809     otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
0810     /* Disable AF-PF mailbox interrupt */
0811     cptpf_disable_afpf_mbox_intr(cptpf);
0812     /* Destroy AF-PF mbox */
0813     cptpf_afpf_mbox_destroy(cptpf);
0814     pci_set_drvdata(pdev, NULL);
0815 }
0816 
0817 /* Supported devices */
0818 static const struct pci_device_id otx2_cpt_id_table[] = {
0819     { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
0820     { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
0821     { 0, }  /* end of table */
0822 };
0823 
0824 static struct pci_driver otx2_cpt_pci_driver = {
0825     .name = OTX2_CPT_DRV_NAME,
0826     .id_table = otx2_cpt_id_table,
0827     .probe = otx2_cptpf_probe,
0828     .remove = otx2_cptpf_remove,
0829     .sriov_configure = otx2_cptpf_sriov_configure
0830 };
0831 
0832 module_pci_driver(otx2_cpt_pci_driver);
0833 
0834 MODULE_AUTHOR("Marvell");
0835 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
0836 MODULE_LICENSE("GPL v2");
0837 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);