Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2020 Marvell. */
0003 
0004 #include "otx2_cpt_common.h"
0005 #include "otx2_cptvf.h"
0006 #include "otx2_cptlf.h"
0007 #include "otx2_cptvf_algs.h"
0008 #include "cn10k_cpt.h"
0009 #include <rvu_reg.h>
0010 
0011 #define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
0012 
0013 static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
0014 {
0015     /* Clear interrupt if any */
0016     otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
0017              0x1ULL);
0018 
0019     /* Enable PF-VF interrupt */
0020     otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
0021              OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
0022 }
0023 
0024 static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
0025 {
0026     /* Disable PF-VF interrupt */
0027     otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
0028              OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
0029 
0030     /* Clear interrupt if any */
0031     otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
0032              0x1ULL);
0033 }
0034 
0035 static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
0036 {
0037     int ret, irq;
0038     int num_vec;
0039 
0040     num_vec = pci_msix_vec_count(cptvf->pdev);
0041     if (num_vec <= 0)
0042         return -EINVAL;
0043 
0044     /* Enable MSI-X */
0045     ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
0046                     PCI_IRQ_MSIX);
0047     if (ret < 0) {
0048         dev_err(&cptvf->pdev->dev,
0049             "Request for %d msix vectors failed\n", num_vec);
0050         return ret;
0051     }
0052     irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
0053     /* Register VF<=>PF mailbox interrupt handler */
0054     ret = devm_request_irq(&cptvf->pdev->dev, irq,
0055                    otx2_cptvf_pfvf_mbox_intr, 0,
0056                    "CPTPFVF Mbox", cptvf);
0057     if (ret)
0058         return ret;
0059     /* Enable PF-VF mailbox interrupts */
0060     cptvf_enable_pfvf_mbox_intrs(cptvf);
0061 
0062     ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
0063     if (ret) {
0064         dev_warn(&cptvf->pdev->dev,
0065              "PF not responding to mailbox, deferring probe\n");
0066         cptvf_disable_pfvf_mbox_intrs(cptvf);
0067         return -EPROBE_DEFER;
0068     }
0069     return 0;
0070 }
0071 
0072 static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
0073 {
0074     struct pci_dev *pdev = cptvf->pdev;
0075     resource_size_t offset, size;
0076     int ret;
0077 
0078     cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
0079                           WQ_UNBOUND | WQ_HIGHPRI |
0080                           WQ_MEM_RECLAIM, 1);
0081     if (!cptvf->pfvf_mbox_wq)
0082         return -ENOMEM;
0083 
0084     if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
0085         /* For cn10k platform, VF mailbox region is in its BAR2
0086          * register space
0087          */
0088         cptvf->pfvf_mbox_base = cptvf->reg_base +
0089                     CN10K_CPT_VF_MBOX_REGION;
0090     } else {
0091         offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
0092         size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
0093         /* Map PF-VF mailbox memory */
0094         cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
0095                             size);
0096         if (!cptvf->pfvf_mbox_base) {
0097             dev_err(&pdev->dev, "Unable to map BAR4\n");
0098             ret = -ENOMEM;
0099             goto free_wqe;
0100         }
0101     }
0102 
0103     ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
0104                  pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
0105     if (ret)
0106         goto free_wqe;
0107 
0108     ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
0109     if (ret)
0110         goto destroy_mbox;
0111 
0112     INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
0113     return 0;
0114 
0115 destroy_mbox:
0116     otx2_mbox_destroy(&cptvf->pfvf_mbox);
0117 free_wqe:
0118     destroy_workqueue(cptvf->pfvf_mbox_wq);
0119     return ret;
0120 }
0121 
0122 static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
0123 {
0124     destroy_workqueue(cptvf->pfvf_mbox_wq);
0125     otx2_mbox_destroy(&cptvf->pfvf_mbox);
0126 }
0127 
0128 static void cptlf_work_handler(unsigned long data)
0129 {
0130     otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
0131 }
0132 
0133 static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
0134 {
0135     int i;
0136 
0137     for (i = 0; i <  lfs->lfs_num; i++) {
0138         if (!lfs->lf[i].wqe)
0139             continue;
0140 
0141         tasklet_kill(&lfs->lf[i].wqe->work);
0142         kfree(lfs->lf[i].wqe);
0143         lfs->lf[i].wqe = NULL;
0144     }
0145 }
0146 
0147 static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
0148 {
0149     struct otx2_cptlf_wqe *wqe;
0150     int i, ret = 0;
0151 
0152     for (i = 0; i < lfs->lfs_num; i++) {
0153         wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
0154         if (!wqe) {
0155             ret = -ENOMEM;
0156             goto cleanup_tasklet;
0157         }
0158 
0159         tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
0160         wqe->lfs = lfs;
0161         wqe->lf_num = i;
0162         lfs->lf[i].wqe = wqe;
0163     }
0164     return 0;
0165 
0166 cleanup_tasklet:
0167     cleanup_tasklet_work(lfs);
0168     return ret;
0169 }
0170 
0171 static void free_pending_queues(struct otx2_cptlfs_info *lfs)
0172 {
0173     int i;
0174 
0175     for (i = 0; i < lfs->lfs_num; i++) {
0176         kfree(lfs->lf[i].pqueue.head);
0177         lfs->lf[i].pqueue.head = NULL;
0178     }
0179 }
0180 
0181 static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
0182 {
0183     int size, ret, i;
0184 
0185     if (!lfs->lfs_num)
0186         return -EINVAL;
0187 
0188     for (i = 0; i < lfs->lfs_num; i++) {
0189         lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
0190         size = lfs->lf[i].pqueue.qlen *
0191                sizeof(struct otx2_cpt_pending_entry);
0192 
0193         lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
0194         if (!lfs->lf[i].pqueue.head) {
0195             ret = -ENOMEM;
0196             goto error;
0197         }
0198 
0199         /* Initialize spin lock */
0200         spin_lock_init(&lfs->lf[i].pqueue.lock);
0201     }
0202     return 0;
0203 
0204 error:
0205     free_pending_queues(lfs);
0206     return ret;
0207 }
0208 
0209 static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
0210 {
0211     cleanup_tasklet_work(lfs);
0212     free_pending_queues(lfs);
0213 }
0214 
0215 static int lf_sw_init(struct otx2_cptlfs_info *lfs)
0216 {
0217     int ret;
0218 
0219     ret = alloc_pending_queues(lfs);
0220     if (ret) {
0221         dev_err(&lfs->pdev->dev,
0222             "Allocating pending queues failed\n");
0223         return ret;
0224     }
0225     ret = init_tasklet_work(lfs);
0226     if (ret) {
0227         dev_err(&lfs->pdev->dev,
0228             "Tasklet work init failed\n");
0229         goto pending_queues_free;
0230     }
0231     return 0;
0232 
0233 pending_queues_free:
0234     free_pending_queues(lfs);
0235     return ret;
0236 }
0237 
0238 static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
0239 {
0240     atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
0241 
0242     /* Remove interrupts affinity */
0243     otx2_cptlf_free_irqs_affinity(lfs);
0244     /* Disable instruction queue */
0245     otx2_cptlf_disable_iqueues(lfs);
0246     /* Unregister crypto algorithms */
0247     otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
0248     /* Unregister LFs interrupts */
0249     otx2_cptlf_unregister_interrupts(lfs);
0250     /* Cleanup LFs software side */
0251     lf_sw_cleanup(lfs);
0252     /* Send request to detach LFs */
0253     otx2_cpt_detach_rsrcs_msg(lfs);
0254 }
0255 
0256 static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
0257 {
0258     struct otx2_cptlfs_info *lfs = &cptvf->lfs;
0259     struct device *dev = &cptvf->pdev->dev;
0260     int ret, lfs_num;
0261     u8 eng_grp_msk;
0262 
0263     /* Get engine group number for symmetric crypto */
0264     cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
0265     ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
0266     if (ret)
0267         return ret;
0268 
0269     if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
0270         dev_err(dev, "Engine group for kernel crypto not available\n");
0271         ret = -ENOENT;
0272         return ret;
0273     }
0274     eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
0275 
0276     ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
0277     if (ret)
0278         return ret;
0279 
0280     lfs->reg_base = cptvf->reg_base;
0281     lfs->pdev = cptvf->pdev;
0282     lfs->mbox = &cptvf->pfvf_mbox;
0283 
0284     lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
0285           num_online_cpus();
0286     ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
0287                   lfs_num);
0288     if (ret)
0289         return ret;
0290 
0291     /* Get msix offsets for attached LFs */
0292     ret = otx2_cpt_msix_offset_msg(lfs);
0293     if (ret)
0294         goto cleanup_lf;
0295 
0296     /* Initialize LFs software side */
0297     ret = lf_sw_init(lfs);
0298     if (ret)
0299         goto cleanup_lf;
0300 
0301     /* Register LFs interrupts */
0302     ret = otx2_cptlf_register_interrupts(lfs);
0303     if (ret)
0304         goto cleanup_lf_sw;
0305 
0306     /* Set interrupts affinity */
0307     ret = otx2_cptlf_set_irqs_affinity(lfs);
0308     if (ret)
0309         goto unregister_intr;
0310 
0311     atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
0312     /* Register crypto algorithms */
0313     ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
0314     if (ret) {
0315         dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
0316         goto disable_irqs;
0317     }
0318     return 0;
0319 
0320 disable_irqs:
0321     otx2_cptlf_free_irqs_affinity(lfs);
0322 unregister_intr:
0323     otx2_cptlf_unregister_interrupts(lfs);
0324 cleanup_lf_sw:
0325     lf_sw_cleanup(lfs);
0326 cleanup_lf:
0327     otx2_cptlf_shutdown(lfs);
0328 
0329     return ret;
0330 }
0331 
0332 static int otx2_cptvf_probe(struct pci_dev *pdev,
0333                 const struct pci_device_id *ent)
0334 {
0335     struct device *dev = &pdev->dev;
0336     struct otx2_cptvf_dev *cptvf;
0337     int ret;
0338 
0339     cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
0340     if (!cptvf)
0341         return -ENOMEM;
0342 
0343     ret = pcim_enable_device(pdev);
0344     if (ret) {
0345         dev_err(dev, "Failed to enable PCI device\n");
0346         goto clear_drvdata;
0347     }
0348 
0349     ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
0350     if (ret) {
0351         dev_err(dev, "Unable to get usable DMA configuration\n");
0352         goto clear_drvdata;
0353     }
0354     /* Map VF's configuration registers */
0355     ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
0356                          OTX2_CPTVF_DRV_NAME);
0357     if (ret) {
0358         dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
0359         goto clear_drvdata;
0360     }
0361     pci_set_master(pdev);
0362     pci_set_drvdata(pdev, cptvf);
0363     cptvf->pdev = pdev;
0364 
0365     cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
0366 
0367     otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
0368 
0369     ret = cn10k_cptvf_lmtst_init(cptvf);
0370     if (ret)
0371         goto clear_drvdata;
0372 
0373     /* Initialize PF<=>VF mailbox */
0374     ret = cptvf_pfvf_mbox_init(cptvf);
0375     if (ret)
0376         goto clear_drvdata;
0377 
0378     /* Register interrupts */
0379     ret = cptvf_register_interrupts(cptvf);
0380     if (ret)
0381         goto destroy_pfvf_mbox;
0382 
0383     /* Initialize CPT LFs */
0384     ret = cptvf_lf_init(cptvf);
0385     if (ret)
0386         goto unregister_interrupts;
0387 
0388     return 0;
0389 
0390 unregister_interrupts:
0391     cptvf_disable_pfvf_mbox_intrs(cptvf);
0392 destroy_pfvf_mbox:
0393     cptvf_pfvf_mbox_destroy(cptvf);
0394 clear_drvdata:
0395     pci_set_drvdata(pdev, NULL);
0396 
0397     return ret;
0398 }
0399 
0400 static void otx2_cptvf_remove(struct pci_dev *pdev)
0401 {
0402     struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
0403 
0404     if (!cptvf) {
0405         dev_err(&pdev->dev, "Invalid CPT VF device.\n");
0406         return;
0407     }
0408     cptvf_lf_shutdown(&cptvf->lfs);
0409     /* Disable PF-VF mailbox interrupt */
0410     cptvf_disable_pfvf_mbox_intrs(cptvf);
0411     /* Destroy PF-VF mbox */
0412     cptvf_pfvf_mbox_destroy(cptvf);
0413     pci_set_drvdata(pdev, NULL);
0414 }
0415 
0416 /* Supported devices */
0417 static const struct pci_device_id otx2_cptvf_id_table[] = {
0418     {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
0419     {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
0420     { 0, }  /* end of table */
0421 };
0422 
0423 static struct pci_driver otx2_cptvf_pci_driver = {
0424     .name = OTX2_CPTVF_DRV_NAME,
0425     .id_table = otx2_cptvf_id_table,
0426     .probe = otx2_cptvf_probe,
0427     .remove = otx2_cptvf_remove,
0428 };
0429 
0430 module_pci_driver(otx2_cptvf_pci_driver);
0431 
0432 MODULE_AUTHOR("Marvell");
0433 MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
0434 MODULE_LICENSE("GPL v2");
0435 MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);