Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2016 Cavium, Inc.
0004  */
0005 
0006 #include <linux/device.h>
0007 #include <linux/firmware.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/module.h>
0010 #include <linux/moduleparam.h>
0011 #include <linux/pci.h>
0012 #include <linux/printk.h>
0013 
0014 #include "cptpf.h"
0015 
0016 #define DRV_NAME    "thunder-cpt"
0017 #define DRV_VERSION "1.0"
0018 
0019 static u32 num_vfs = 4; /* Default 4 VF enabled */
0020 module_param(num_vfs, uint, 0444);
0021 MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
0022 
0023 /*
0024  * Disable cores specified by coremask
0025  */
0026 static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
0027                   u8 type, u8 grp)
0028 {
0029     u64 pf_exe_ctl;
0030     u32 timeout = 100;
0031     u64 grpmask = 0;
0032     struct device *dev = &cpt->pdev->dev;
0033 
0034     if (type == AE_TYPES)
0035         coremask = (coremask << cpt->max_se_cores);
0036 
0037     /* Disengage the cores from groups */
0038     grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
0039     cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
0040             (grpmask & ~coremask));
0041     udelay(CSR_DELAY);
0042     grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
0043     while (grp & coremask) {
0044         dev_err(dev, "Cores still busy %llx", coremask);
0045         grp = cpt_read_csr64(cpt->reg_base,
0046                      CPTX_PF_EXEC_BUSY(0));
0047         if (timeout--)
0048             break;
0049 
0050         udelay(CSR_DELAY);
0051     }
0052 
0053     /* Disable the cores */
0054     pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
0055     cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
0056             (pf_exe_ctl & ~coremask));
0057     udelay(CSR_DELAY);
0058 }
0059 
0060 /*
0061  * Enable cores specified by coremask
0062  */
0063 static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
0064                  u8 type)
0065 {
0066     u64 pf_exe_ctl;
0067 
0068     if (type == AE_TYPES)
0069         coremask = (coremask << cpt->max_se_cores);
0070 
0071     pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
0072     cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
0073             (pf_exe_ctl | coremask));
0074     udelay(CSR_DELAY);
0075 }
0076 
0077 static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
0078                 u64 coremask, u8 type)
0079 {
0080     u64 pf_gx_en = 0;
0081 
0082     if (type == AE_TYPES)
0083         coremask = (coremask << cpt->max_se_cores);
0084 
0085     pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
0086     cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
0087             (pf_gx_en | coremask));
0088     udelay(CSR_DELAY);
0089 }
0090 
0091 static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
0092 {
0093     /* Clear mbox(0) interupts for all vfs */
0094     cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
0095 }
0096 
0097 static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
0098 {
0099     /* Clear ecc(0) interupts for all vfs */
0100     cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
0101 }
0102 
0103 static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
0104 {
0105     /* Clear exec interupts for all vfs */
0106     cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
0107 }
0108 
0109 static void cpt_disable_all_interrupts(struct cpt_device *cpt)
0110 {
0111     cpt_disable_mbox_interrupts(cpt);
0112     cpt_disable_ecc_interrupts(cpt);
0113     cpt_disable_exec_interrupts(cpt);
0114 }
0115 
0116 static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
0117 {
0118     /* Set mbox(0) interupts for all vfs */
0119     cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
0120 }
0121 
0122 static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
0123 {
0124     int ret = 0, core = 0, shift = 0;
0125     u32 total_cores = 0;
0126     struct device *dev = &cpt->pdev->dev;
0127 
0128     if (!mcode || !mcode->code) {
0129         dev_err(dev, "Either the mcode is null or data is NULL\n");
0130         return -EINVAL;
0131     }
0132 
0133     if (mcode->code_size == 0) {
0134         dev_err(dev, "microcode size is 0\n");
0135         return -EINVAL;
0136     }
0137 
0138     /* Assumes 0-9 are SE cores for UCODE_BASE registers and
0139      * AE core bases follow
0140      */
0141     if (mcode->is_ae) {
0142         core = CPT_MAX_SE_CORES; /* start couting from 10 */
0143         total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */
0144     } else {
0145         core = 0; /* start couting from 0 */
0146         total_cores = CPT_MAX_SE_CORES; /* upto 9 */
0147     }
0148 
0149     /* Point to microcode for each core of the group */
0150     for (; core < total_cores ; core++, shift++) {
0151         if (mcode->core_mask & (1 << shift)) {
0152             cpt_write_csr64(cpt->reg_base,
0153                     CPTX_PF_ENGX_UCODE_BASE(0, core),
0154                     (u64)mcode->phys_base);
0155         }
0156     }
0157     return ret;
0158 }
0159 
0160 static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
0161 {
0162     int ret = 0;
0163     struct device *dev = &cpt->pdev->dev;
0164 
0165     /* Make device not ready */
0166     cpt->flags &= ~CPT_FLAG_DEVICE_READY;
0167     /* Disable All PF interrupts */
0168     cpt_disable_all_interrupts(cpt);
0169     /* Calculate mcode group and coremasks */
0170     if (mcode->is_ae) {
0171         if (mcode->num_cores > cpt->max_ae_cores) {
0172             dev_err(dev, "Requested for more cores than available AE cores\n");
0173             ret = -EINVAL;
0174             goto cpt_init_fail;
0175         }
0176 
0177         if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
0178             dev_err(dev, "Can't load, all eight microcode groups in use");
0179             return -ENFILE;
0180         }
0181 
0182         mcode->group = cpt->next_group;
0183         /* Convert requested cores to mask */
0184         mcode->core_mask = GENMASK(mcode->num_cores, 0);
0185         cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
0186                   mcode->group);
0187         /* Load microcode for AE engines */
0188         ret = cpt_load_microcode(cpt, mcode);
0189         if (ret) {
0190             dev_err(dev, "Microcode load Failed for %s\n",
0191                 mcode->version);
0192             goto cpt_init_fail;
0193         }
0194         cpt->next_group++;
0195         /* Configure group mask for the mcode */
0196         cpt_configure_group(cpt, mcode->group, mcode->core_mask,
0197                     AE_TYPES);
0198         /* Enable AE cores for the group mask */
0199         cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
0200     } else {
0201         if (mcode->num_cores > cpt->max_se_cores) {
0202             dev_err(dev, "Requested for more cores than available SE cores\n");
0203             ret = -EINVAL;
0204             goto cpt_init_fail;
0205         }
0206         if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
0207             dev_err(dev, "Can't load, all eight microcode groups in use");
0208             return -ENFILE;
0209         }
0210 
0211         mcode->group = cpt->next_group;
0212         /* Covert requested cores to mask */
0213         mcode->core_mask = GENMASK(mcode->num_cores, 0);
0214         cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
0215                   mcode->group);
0216         /* Load microcode for SE engines */
0217         ret = cpt_load_microcode(cpt, mcode);
0218         if (ret) {
0219             dev_err(dev, "Microcode load Failed for %s\n",
0220                 mcode->version);
0221             goto cpt_init_fail;
0222         }
0223         cpt->next_group++;
0224         /* Configure group mask for the mcode */
0225         cpt_configure_group(cpt, mcode->group, mcode->core_mask,
0226                     SE_TYPES);
0227         /* Enable SE cores for the group mask */
0228         cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
0229     }
0230 
0231     /* Enabled PF mailbox interrupts */
0232     cpt_enable_mbox_interrupts(cpt);
0233     cpt->flags |= CPT_FLAG_DEVICE_READY;
0234 
0235     return ret;
0236 
0237 cpt_init_fail:
0238     /* Enabled PF mailbox interrupts */
0239     cpt_enable_mbox_interrupts(cpt);
0240 
0241     return ret;
0242 }
0243 
0244 struct ucode_header {
0245     u8 version[CPT_UCODE_VERSION_SZ];
0246     __be32 code_length;
0247     u32 data_length;
0248     u64 sram_address;
0249 };
0250 
0251 static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
0252 {
0253     const struct firmware *fw_entry;
0254     struct device *dev = &cpt->pdev->dev;
0255     struct ucode_header *ucode;
0256     struct microcode *mcode;
0257     int j, ret = 0;
0258 
0259     ret = request_firmware(&fw_entry, fw, dev);
0260     if (ret)
0261         return ret;
0262 
0263     ucode = (struct ucode_header *)fw_entry->data;
0264     mcode = &cpt->mcode[cpt->next_mc_idx];
0265     memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
0266     mcode->code_size = ntohl(ucode->code_length) * 2;
0267     if (!mcode->code_size) {
0268         ret = -EINVAL;
0269         goto fw_release;
0270     }
0271 
0272     mcode->is_ae = is_ae;
0273     mcode->core_mask = 0ULL;
0274     mcode->num_cores = is_ae ? 6 : 10;
0275 
0276     /*  Allocate DMAable space */
0277     mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
0278                      &mcode->phys_base, GFP_KERNEL);
0279     if (!mcode->code) {
0280         dev_err(dev, "Unable to allocate space for microcode");
0281         ret = -ENOMEM;
0282         goto fw_release;
0283     }
0284 
0285     memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
0286            mcode->code_size);
0287 
0288     /* Byte swap 64-bit */
0289     for (j = 0; j < (mcode->code_size / 8); j++)
0290         ((__be64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]);
0291     /*  MC needs 16-bit swap */
0292     for (j = 0; j < (mcode->code_size / 2); j++)
0293         ((__be16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]);
0294 
0295     dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size);
0296     dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae);
0297     dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores);
0298     dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code);
0299     dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base);
0300 
0301     ret = do_cpt_init(cpt, mcode);
0302     if (ret) {
0303         dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
0304         goto fw_release;
0305     }
0306 
0307     dev_info(dev, "Microcode Loaded %s\n", mcode->version);
0308     mcode->is_mc_valid = 1;
0309     cpt->next_mc_idx++;
0310 
0311 fw_release:
0312     release_firmware(fw_entry);
0313 
0314     return ret;
0315 }
0316 
0317 static int cpt_ucode_load(struct cpt_device *cpt)
0318 {
0319     int ret = 0;
0320     struct device *dev = &cpt->pdev->dev;
0321 
0322     ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
0323     if (ret) {
0324         dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n", ret);
0325         return ret;
0326     }
0327     ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
0328     if (ret) {
0329         dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret);
0330         return ret;
0331     }
0332 
0333     return ret;
0334 }
0335 
0336 static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq)
0337 {
0338     struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
0339 
0340     cpt_mbox_intr_handler(cpt, 0);
0341 
0342     return IRQ_HANDLED;
0343 }
0344 
0345 static void cpt_reset(struct cpt_device *cpt)
0346 {
0347     cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
0348 }
0349 
0350 static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
0351 {
0352     union cptx_pf_constants pf_cnsts = {0};
0353 
0354     pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
0355     cpt->max_se_cores = pf_cnsts.s.se;
0356     cpt->max_ae_cores = pf_cnsts.s.ae;
0357 }
0358 
0359 static u32 cpt_check_bist_status(struct cpt_device *cpt)
0360 {
0361     union cptx_pf_bist_status bist_sts = {0};
0362 
0363     bist_sts.u = cpt_read_csr64(cpt->reg_base,
0364                     CPTX_PF_BIST_STATUS(0));
0365 
0366     return bist_sts.u;
0367 }
0368 
0369 static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
0370 {
0371     union cptx_pf_exe_bist_status bist_sts = {0};
0372 
0373     bist_sts.u = cpt_read_csr64(cpt->reg_base,
0374                     CPTX_PF_EXE_BIST_STATUS(0));
0375 
0376     return bist_sts.u;
0377 }
0378 
0379 static void cpt_disable_all_cores(struct cpt_device *cpt)
0380 {
0381     u32 grp, timeout = 100;
0382     struct device *dev = &cpt->pdev->dev;
0383 
0384     /* Disengage the cores from groups */
0385     for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
0386         cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
0387         udelay(CSR_DELAY);
0388     }
0389 
0390     grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
0391     while (grp) {
0392         dev_err(dev, "Cores still busy");
0393         grp = cpt_read_csr64(cpt->reg_base,
0394                      CPTX_PF_EXEC_BUSY(0));
0395         if (timeout--)
0396             break;
0397 
0398         udelay(CSR_DELAY);
0399     }
0400     /* Disable the cores */
0401     cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
0402 }
0403 
0404 /*
0405  * Ensure all cores are disengaged from all groups by
0406  * calling cpt_disable_all_cores() before calling this
0407  * function.
0408  */
0409 static void cpt_unload_microcode(struct cpt_device *cpt)
0410 {
0411     u32 grp = 0, core;
0412 
0413     /* Free microcode bases and reset group masks */
0414     for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
0415         struct microcode *mcode = &cpt->mcode[grp];
0416 
0417         if (cpt->mcode[grp].code)
0418             dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
0419                       mcode->code, mcode->phys_base);
0420         mcode->code = NULL;
0421     }
0422     /* Clear UCODE_BASE registers for all engines */
0423     for (core = 0; core < CPT_MAX_TOTAL_CORES; core++)
0424         cpt_write_csr64(cpt->reg_base,
0425                 CPTX_PF_ENGX_UCODE_BASE(0, core), 0ull);
0426 }
0427 
0428 static int cpt_device_init(struct cpt_device *cpt)
0429 {
0430     u64 bist;
0431     struct device *dev = &cpt->pdev->dev;
0432 
0433     /* Reset the PF when probed first */
0434     cpt_reset(cpt);
0435     msleep(100);
0436 
0437     /*Check BIST status*/
0438     bist = (u64)cpt_check_bist_status(cpt);
0439     if (bist) {
0440         dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
0441         return -ENODEV;
0442     }
0443 
0444     bist = cpt_check_exe_bist_status(cpt);
0445     if (bist) {
0446         dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
0447         return -ENODEV;
0448     }
0449 
0450     /*Get CLK frequency*/
0451     /*Get max enabled cores */
0452     cpt_find_max_enabled_cores(cpt);
0453     /*Disable all cores*/
0454     cpt_disable_all_cores(cpt);
0455     /*Reset device parameters*/
0456     cpt->next_mc_idx   = 0;
0457     cpt->next_group = 0;
0458     /* PF is ready */
0459     cpt->flags |= CPT_FLAG_DEVICE_READY;
0460 
0461     return 0;
0462 }
0463 
0464 static int cpt_register_interrupts(struct cpt_device *cpt)
0465 {
0466     int ret;
0467     struct device *dev = &cpt->pdev->dev;
0468 
0469     /* Enable MSI-X */
0470     ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
0471             CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX);
0472     if (ret < 0) {
0473         dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
0474             CPT_PF_MSIX_VECTORS);
0475         return ret;
0476     }
0477 
0478     /* Register mailbox interrupt handlers */
0479     ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
0480               cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
0481     if (ret)
0482         goto fail;
0483 
0484     /* Enable mailbox interrupt */
0485     cpt_enable_mbox_interrupts(cpt);
0486     return 0;
0487 
0488 fail:
0489     dev_err(dev, "Request irq failed\n");
0490     pci_disable_msix(cpt->pdev);
0491     return ret;
0492 }
0493 
0494 static void cpt_unregister_interrupts(struct cpt_device *cpt)
0495 {
0496     free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
0497     pci_disable_msix(cpt->pdev);
0498 }
0499 
0500 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
0501 {
0502     int pos = 0;
0503     int err;
0504     u16 total_vf_cnt;
0505     struct pci_dev *pdev = cpt->pdev;
0506 
0507     pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
0508     if (!pos) {
0509         dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
0510         return -ENODEV;
0511     }
0512 
0513     cpt->num_vf_en = num_vfs; /* User requested VFs */
0514     pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
0515     if (total_vf_cnt < cpt->num_vf_en)
0516         cpt->num_vf_en = total_vf_cnt;
0517 
0518     if (!total_vf_cnt)
0519         return 0;
0520 
0521     /*Enabled the available VFs */
0522     err = pci_enable_sriov(pdev, cpt->num_vf_en);
0523     if (err) {
0524         dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
0525             cpt->num_vf_en);
0526         cpt->num_vf_en = 0;
0527         return err;
0528     }
0529 
0530     /* TODO: Optionally enable static VQ priorities feature */
0531 
0532     dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
0533          cpt->num_vf_en);
0534 
0535     cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
0536 
0537     return 0;
0538 }
0539 
0540 static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
0541 {
0542     struct device *dev = &pdev->dev;
0543     struct cpt_device *cpt;
0544     int err;
0545 
0546     if (num_vfs > 16 || num_vfs < 4) {
0547         dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n",
0548              num_vfs);
0549         num_vfs = 4;
0550     }
0551 
0552     cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
0553     if (!cpt)
0554         return -ENOMEM;
0555 
0556     pci_set_drvdata(pdev, cpt);
0557     cpt->pdev = pdev;
0558     err = pci_enable_device(pdev);
0559     if (err) {
0560         dev_err(dev, "Failed to enable PCI device\n");
0561         pci_set_drvdata(pdev, NULL);
0562         return err;
0563     }
0564 
0565     err = pci_request_regions(pdev, DRV_NAME);
0566     if (err) {
0567         dev_err(dev, "PCI request regions failed 0x%x\n", err);
0568         goto cpt_err_disable_device;
0569     }
0570 
0571     err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
0572     if (err) {
0573         dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
0574         goto cpt_err_release_regions;
0575     }
0576 
0577     /* MAP PF's configuration registers */
0578     cpt->reg_base = pcim_iomap(pdev, 0, 0);
0579     if (!cpt->reg_base) {
0580         dev_err(dev, "Cannot map config register space, aborting\n");
0581         err = -ENOMEM;
0582         goto cpt_err_release_regions;
0583     }
0584 
0585     /* CPT device HW initialization */
0586     cpt_device_init(cpt);
0587 
0588     /* Register interrupts */
0589     err = cpt_register_interrupts(cpt);
0590     if (err)
0591         goto cpt_err_release_regions;
0592 
0593     err = cpt_ucode_load(cpt);
0594     if (err)
0595         goto cpt_err_unregister_interrupts;
0596 
0597     /* Configure SRIOV */
0598     err = cpt_sriov_init(cpt, num_vfs);
0599     if (err)
0600         goto cpt_err_unregister_interrupts;
0601 
0602     return 0;
0603 
0604 cpt_err_unregister_interrupts:
0605     cpt_unregister_interrupts(cpt);
0606 cpt_err_release_regions:
0607     pci_release_regions(pdev);
0608 cpt_err_disable_device:
0609     pci_disable_device(pdev);
0610     pci_set_drvdata(pdev, NULL);
0611     return err;
0612 }
0613 
0614 static void cpt_remove(struct pci_dev *pdev)
0615 {
0616     struct cpt_device *cpt = pci_get_drvdata(pdev);
0617 
0618     /* Disengage SE and AE cores from all groups*/
0619     cpt_disable_all_cores(cpt);
0620     /* Unload microcodes */
0621     cpt_unload_microcode(cpt);
0622     cpt_unregister_interrupts(cpt);
0623     pci_disable_sriov(pdev);
0624     pci_release_regions(pdev);
0625     pci_disable_device(pdev);
0626     pci_set_drvdata(pdev, NULL);
0627 }
0628 
0629 static void cpt_shutdown(struct pci_dev *pdev)
0630 {
0631     struct cpt_device *cpt = pci_get_drvdata(pdev);
0632 
0633     if (!cpt)
0634         return;
0635 
0636     dev_info(&pdev->dev, "Shutdown device %x:%x.\n",
0637          (u32)pdev->vendor, (u32)pdev->device);
0638 
0639     cpt_unregister_interrupts(cpt);
0640     pci_release_regions(pdev);
0641     pci_disable_device(pdev);
0642     pci_set_drvdata(pdev, NULL);
0643 }
0644 
0645 /* Supported devices */
0646 static const struct pci_device_id cpt_id_table[] = {
0647     { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) },
0648     { 0, }  /* end of table */
0649 };
0650 
0651 static struct pci_driver cpt_pci_driver = {
0652     .name = DRV_NAME,
0653     .id_table = cpt_id_table,
0654     .probe = cpt_probe,
0655     .remove = cpt_remove,
0656     .shutdown = cpt_shutdown,
0657 };
0658 
0659 module_pci_driver(cpt_pci_driver);
0660 
0661 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
0662 MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver");
0663 MODULE_LICENSE("GPL v2");
0664 MODULE_VERSION(DRV_VERSION);
0665 MODULE_DEVICE_TABLE(pci, cpt_id_table);