Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Virtual Function ethernet driver
0003  *
0004  * Copyright (C) 2020 Marvell.
0005  *
0006  */
0007 
0008 #include <linux/etherdevice.h>
0009 #include <linux/module.h>
0010 #include <linux/pci.h>
0011 #include <linux/net_tstamp.h>
0012 
0013 #include "otx2_common.h"
0014 #include "otx2_reg.h"
0015 #include "otx2_ptp.h"
0016 #include "cn10k.h"
0017 
0018 #define DRV_NAME    "rvu_nicvf"
0019 #define DRV_STRING  "Marvell RVU NIC Virtual Function Driver"
0020 
0021 static const struct pci_device_id otx2_vf_id_table[] = {
0022     { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
0023     { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
0024     { }
0025 };
0026 
0027 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
0028 MODULE_DESCRIPTION(DRV_STRING);
0029 MODULE_LICENSE("GPL v2");
0030 MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
0031 
0032 /* RVU VF Interrupt Vector Enumeration */
0033 enum {
0034     RVU_VF_INT_VEC_MBOX = 0x0,
0035 };
0036 
0037 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
0038                      struct mbox_msghdr *msg)
0039 {
0040     if (msg->id >= MBOX_MSG_MAX) {
0041         dev_err(vf->dev,
0042             "Mbox msg with unknown ID %d\n", msg->id);
0043         return;
0044     }
0045 
0046     if (msg->sig != OTX2_MBOX_RSP_SIG) {
0047         dev_err(vf->dev,
0048             "Mbox msg with wrong signature %x, ID %d\n",
0049             msg->sig, msg->id);
0050         return;
0051     }
0052 
0053     if (msg->rc == MBOX_MSG_INVALID) {
0054         dev_err(vf->dev,
0055             "PF/AF says the sent msg(s) %d were invalid\n",
0056             msg->id);
0057         return;
0058     }
0059 
0060     switch (msg->id) {
0061     case MBOX_MSG_READY:
0062         vf->pcifunc = msg->pcifunc;
0063         break;
0064     case MBOX_MSG_MSIX_OFFSET:
0065         mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
0066         break;
0067     case MBOX_MSG_NPA_LF_ALLOC:
0068         mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
0069         break;
0070     case MBOX_MSG_NIX_LF_ALLOC:
0071         mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
0072         break;
0073     case MBOX_MSG_NIX_TXSCH_ALLOC:
0074         mbox_handler_nix_txsch_alloc(vf,
0075                          (struct nix_txsch_alloc_rsp *)msg);
0076         break;
0077     case MBOX_MSG_NIX_BP_ENABLE:
0078         mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
0079         break;
0080     default:
0081         if (msg->rc)
0082             dev_err(vf->dev,
0083                 "Mbox msg response has err %d, ID %d\n",
0084                 msg->rc, msg->id);
0085     }
0086 }
0087 
0088 static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
0089 {
0090     struct otx2_mbox_dev *mdev;
0091     struct mbox_hdr *rsp_hdr;
0092     struct mbox_msghdr *msg;
0093     struct otx2_mbox *mbox;
0094     struct mbox *af_mbox;
0095     int offset, id;
0096 
0097     af_mbox = container_of(work, struct mbox, mbox_wrk);
0098     mbox = &af_mbox->mbox;
0099     mdev = &mbox->dev[0];
0100     rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0101     if (af_mbox->num_msgs == 0)
0102         return;
0103     offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
0104 
0105     for (id = 0; id < af_mbox->num_msgs; id++) {
0106         msg = (struct mbox_msghdr *)(mdev->mbase + offset);
0107         otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
0108         offset = mbox->rx_start + msg->next_msgoff;
0109         if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
0110             __otx2_mbox_reset(mbox, 0);
0111         mdev->msgs_acked++;
0112     }
0113 }
0114 
0115 static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
0116                       struct mbox_msghdr *req)
0117 {
0118     struct msg_rsp *rsp;
0119     int err;
0120 
0121     /* Check if valid, if not reply with a invalid msg */
0122     if (req->sig != OTX2_MBOX_REQ_SIG) {
0123         otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
0124         return -ENODEV;
0125     }
0126 
0127     switch (req->id) {
0128     case MBOX_MSG_CGX_LINK_EVENT:
0129         rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
0130                         &vf->mbox.mbox_up, 0,
0131                         sizeof(struct msg_rsp));
0132         if (!rsp)
0133             return -ENOMEM;
0134 
0135         rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
0136         rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
0137         rsp->hdr.pcifunc = 0;
0138         rsp->hdr.rc = 0;
0139         err = otx2_mbox_up_handler_cgx_link_event(
0140                 vf, (struct cgx_link_info_msg *)req, rsp);
0141         return err;
0142     default:
0143         otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
0144         return -ENODEV;
0145     }
0146     return 0;
0147 }
0148 
0149 static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
0150 {
0151     struct otx2_mbox_dev *mdev;
0152     struct mbox_hdr *rsp_hdr;
0153     struct mbox_msghdr *msg;
0154     struct otx2_mbox *mbox;
0155     struct mbox *vf_mbox;
0156     struct otx2_nic *vf;
0157     int offset, id;
0158 
0159     vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
0160     vf = vf_mbox->pfvf;
0161     mbox = &vf_mbox->mbox_up;
0162     mdev = &mbox->dev[0];
0163 
0164     rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0165     if (vf_mbox->up_num_msgs == 0)
0166         return;
0167 
0168     offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
0169 
0170     for (id = 0; id < vf_mbox->up_num_msgs; id++) {
0171         msg = (struct mbox_msghdr *)(mdev->mbase + offset);
0172         otx2vf_process_mbox_msg_up(vf, msg);
0173         offset = mbox->rx_start + msg->next_msgoff;
0174     }
0175 
0176     otx2_mbox_msg_send(mbox, 0);
0177 }
0178 
0179 static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
0180 {
0181     struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
0182     struct otx2_mbox_dev *mdev;
0183     struct otx2_mbox *mbox;
0184     struct mbox_hdr *hdr;
0185 
0186     /* Clear the IRQ */
0187     otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
0188 
0189     /* Read latest mbox data */
0190     smp_rmb();
0191 
0192     /* Check for PF => VF response messages */
0193     mbox = &vf->mbox.mbox;
0194     mdev = &mbox->dev[0];
0195     otx2_sync_mbox_bbuf(mbox, 0);
0196 
0197     trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
0198 
0199     hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0200     if (hdr->num_msgs) {
0201         vf->mbox.num_msgs = hdr->num_msgs;
0202         hdr->num_msgs = 0;
0203         memset(mbox->hwbase + mbox->rx_start, 0,
0204                ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
0205         queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
0206     }
0207     /* Check for PF => VF notification messages */
0208     mbox = &vf->mbox.mbox_up;
0209     mdev = &mbox->dev[0];
0210     otx2_sync_mbox_bbuf(mbox, 0);
0211 
0212     hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0213     if (hdr->num_msgs) {
0214         vf->mbox.up_num_msgs = hdr->num_msgs;
0215         hdr->num_msgs = 0;
0216         memset(mbox->hwbase + mbox->rx_start, 0,
0217                ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
0218         queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
0219     }
0220 
0221     return IRQ_HANDLED;
0222 }
0223 
0224 static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
0225 {
0226     int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
0227 
0228     /* Disable VF => PF mailbox IRQ */
0229     otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
0230     free_irq(vector, vf);
0231 }
0232 
0233 static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
0234 {
0235     struct otx2_hw *hw = &vf->hw;
0236     struct msg_req *req;
0237     char *irq_name;
0238     int err;
0239 
0240     /* Register mailbox interrupt handler */
0241     irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
0242     snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
0243     err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
0244               otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
0245     if (err) {
0246         dev_err(vf->dev,
0247             "RVUPF: IRQ registration failed for VFAF mbox irq\n");
0248         return err;
0249     }
0250 
0251     /* Enable mailbox interrupt for msgs coming from PF.
0252      * First clear to avoid spurious interrupts, if any.
0253      */
0254     otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
0255     otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
0256 
0257     if (!probe_pf)
0258         return 0;
0259 
0260     /* Check mailbox communication with PF */
0261     req = otx2_mbox_alloc_msg_ready(&vf->mbox);
0262     if (!req) {
0263         otx2vf_disable_mbox_intr(vf);
0264         return -ENOMEM;
0265     }
0266 
0267     err = otx2_sync_mbox_msg(&vf->mbox);
0268     if (err) {
0269         dev_warn(vf->dev,
0270              "AF not responding to mailbox, deferring probe\n");
0271         otx2vf_disable_mbox_intr(vf);
0272         return -EPROBE_DEFER;
0273     }
0274     return 0;
0275 }
0276 
0277 static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
0278 {
0279     struct mbox *mbox = &vf->mbox;
0280 
0281     if (vf->mbox_wq) {
0282         destroy_workqueue(vf->mbox_wq);
0283         vf->mbox_wq = NULL;
0284     }
0285 
0286     if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
0287         iounmap((void __iomem *)mbox->mbox.hwbase);
0288 
0289     otx2_mbox_destroy(&mbox->mbox);
0290     otx2_mbox_destroy(&mbox->mbox_up);
0291 }
0292 
0293 static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
0294 {
0295     struct mbox *mbox = &vf->mbox;
0296     void __iomem *hwbase;
0297     int err;
0298 
0299     mbox->pfvf = vf;
0300     vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
0301                       WQ_UNBOUND | WQ_HIGHPRI |
0302                       WQ_MEM_RECLAIM, 1);
0303     if (!vf->mbox_wq)
0304         return -ENOMEM;
0305 
0306     if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
0307         /* For cn10k platform, VF mailbox region is in its BAR2
0308          * register space
0309          */
0310         hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
0311     } else {
0312         /* Mailbox is a reserved memory (in RAM) region shared between
0313          * admin function (i.e PF0) and this VF, shouldn't be mapped as
0314          * device memory to allow unaligned accesses.
0315          */
0316         hwbase = ioremap_wc(pci_resource_start(vf->pdev,
0317                                PCI_MBOX_BAR_NUM),
0318                     pci_resource_len(vf->pdev,
0319                              PCI_MBOX_BAR_NUM));
0320         if (!hwbase) {
0321             dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
0322             err = -ENOMEM;
0323             goto exit;
0324         }
0325     }
0326 
0327     err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
0328                  MBOX_DIR_VFPF, 1);
0329     if (err)
0330         goto exit;
0331 
0332     err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
0333                  MBOX_DIR_VFPF_UP, 1);
0334     if (err)
0335         goto exit;
0336 
0337     err = otx2_mbox_bbuf_init(mbox, vf->pdev);
0338     if (err)
0339         goto exit;
0340 
0341     INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
0342     INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
0343     mutex_init(&mbox->lock);
0344 
0345     return 0;
0346 exit:
0347     if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
0348         iounmap(hwbase);
0349     destroy_workqueue(vf->mbox_wq);
0350     return err;
0351 }
0352 
0353 static int otx2vf_open(struct net_device *netdev)
0354 {
0355     struct otx2_nic *vf;
0356     int err;
0357 
0358     err = otx2_open(netdev);
0359     if (err)
0360         return err;
0361 
0362     /* LBKs do not receive link events so tell everyone we are up here */
0363     vf = netdev_priv(netdev);
0364     if (is_otx2_lbkvf(vf->pdev)) {
0365         pr_info("%s NIC Link is UP\n", netdev->name);
0366         netif_carrier_on(netdev);
0367         netif_tx_start_all_queues(netdev);
0368     }
0369 
0370     return 0;
0371 }
0372 
0373 static int otx2vf_stop(struct net_device *netdev)
0374 {
0375     return otx2_stop(netdev);
0376 }
0377 
0378 static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
0379 {
0380     struct otx2_nic *vf = netdev_priv(netdev);
0381     int qidx = skb_get_queue_mapping(skb);
0382     struct otx2_snd_queue *sq;
0383     struct netdev_queue *txq;
0384 
0385     sq = &vf->qset.sq[qidx];
0386     txq = netdev_get_tx_queue(netdev, qidx);
0387 
0388     if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
0389         netif_tx_stop_queue(txq);
0390 
0391         /* Check again, incase SQBs got freed up */
0392         smp_mb();
0393         if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
0394                             > sq->sqe_thresh)
0395             netif_tx_wake_queue(txq);
0396 
0397         return NETDEV_TX_BUSY;
0398     }
0399 
0400     return NETDEV_TX_OK;
0401 }
0402 
0403 static void otx2vf_set_rx_mode(struct net_device *netdev)
0404 {
0405     struct otx2_nic *vf = netdev_priv(netdev);
0406 
0407     queue_work(vf->otx2_wq, &vf->rx_mode_work);
0408 }
0409 
0410 static void otx2vf_do_set_rx_mode(struct work_struct *work)
0411 {
0412     struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
0413     struct net_device *netdev = vf->netdev;
0414     unsigned int flags = netdev->flags;
0415     struct nix_rx_mode *req;
0416 
0417     mutex_lock(&vf->mbox.lock);
0418 
0419     req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
0420     if (!req) {
0421         mutex_unlock(&vf->mbox.lock);
0422         return;
0423     }
0424 
0425     req->mode = NIX_RX_MODE_UCAST;
0426 
0427     if (flags & IFF_PROMISC)
0428         req->mode |= NIX_RX_MODE_PROMISC;
0429     if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
0430         req->mode |= NIX_RX_MODE_ALLMULTI;
0431 
0432     req->mode |= NIX_RX_MODE_USE_MCE;
0433 
0434     otx2_sync_mbox_msg(&vf->mbox);
0435 
0436     mutex_unlock(&vf->mbox.lock);
0437 }
0438 
0439 static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
0440 {
0441     bool if_up = netif_running(netdev);
0442     int err = 0;
0443 
0444     if (if_up)
0445         otx2vf_stop(netdev);
0446 
0447     netdev_info(netdev, "Changing MTU from %d to %d\n",
0448             netdev->mtu, new_mtu);
0449     netdev->mtu = new_mtu;
0450 
0451     if (if_up)
0452         err = otx2vf_open(netdev);
0453 
0454     return err;
0455 }
0456 
0457 static void otx2vf_reset_task(struct work_struct *work)
0458 {
0459     struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
0460 
0461     rtnl_lock();
0462 
0463     if (netif_running(vf->netdev)) {
0464         otx2vf_stop(vf->netdev);
0465         vf->reset_count++;
0466         otx2vf_open(vf->netdev);
0467     }
0468 
0469     rtnl_unlock();
0470 }
0471 
0472 static int otx2vf_set_features(struct net_device *netdev,
0473                    netdev_features_t features)
0474 {
0475     return otx2_handle_ntuple_tc_features(netdev, features);
0476 }
0477 
0478 static const struct net_device_ops otx2vf_netdev_ops = {
0479     .ndo_open = otx2vf_open,
0480     .ndo_stop = otx2vf_stop,
0481     .ndo_start_xmit = otx2vf_xmit,
0482     .ndo_set_rx_mode = otx2vf_set_rx_mode,
0483     .ndo_set_mac_address = otx2_set_mac_address,
0484     .ndo_change_mtu = otx2vf_change_mtu,
0485     .ndo_set_features = otx2vf_set_features,
0486     .ndo_get_stats64 = otx2_get_stats64,
0487     .ndo_tx_timeout = otx2_tx_timeout,
0488     .ndo_eth_ioctl  = otx2_ioctl,
0489     .ndo_setup_tc = otx2_setup_tc,
0490 };
0491 
0492 static int otx2_wq_init(struct otx2_nic *vf)
0493 {
0494     vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
0495     if (!vf->otx2_wq)
0496         return -ENOMEM;
0497 
0498     INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
0499     INIT_WORK(&vf->reset_task, otx2vf_reset_task);
0500     return 0;
0501 }
0502 
0503 static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
0504 {
0505     struct otx2_hw *hw = &vf->hw;
0506     int num_vec, err;
0507 
0508     num_vec = hw->nix_msixoff;
0509     num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
0510 
0511     otx2vf_disable_mbox_intr(vf);
0512     pci_free_irq_vectors(hw->pdev);
0513     err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
0514     if (err < 0) {
0515         dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
0516             __func__, num_vec);
0517         return err;
0518     }
0519 
0520     return otx2vf_register_mbox_intr(vf, false);
0521 }
0522 
0523 static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
0524 {
0525     int num_vec = pci_msix_vec_count(pdev);
0526     struct device *dev = &pdev->dev;
0527     struct net_device *netdev;
0528     struct otx2_nic *vf;
0529     struct otx2_hw *hw;
0530     int err, qcount;
0531 
0532     err = pcim_enable_device(pdev);
0533     if (err) {
0534         dev_err(dev, "Failed to enable PCI device\n");
0535         return err;
0536     }
0537 
0538     err = pci_request_regions(pdev, DRV_NAME);
0539     if (err) {
0540         dev_err(dev, "PCI request regions failed 0x%x\n", err);
0541         return err;
0542     }
0543 
0544     err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
0545     if (err) {
0546         dev_err(dev, "DMA mask config failed, abort\n");
0547         goto err_release_regions;
0548     }
0549 
0550     pci_set_master(pdev);
0551 
0552     qcount = num_online_cpus();
0553     netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
0554     if (!netdev) {
0555         err = -ENOMEM;
0556         goto err_release_regions;
0557     }
0558 
0559     pci_set_drvdata(pdev, netdev);
0560     SET_NETDEV_DEV(netdev, &pdev->dev);
0561     vf = netdev_priv(netdev);
0562     vf->netdev = netdev;
0563     vf->pdev = pdev;
0564     vf->dev = dev;
0565     vf->iommu_domain = iommu_get_domain_for_dev(dev);
0566 
0567     vf->flags |= OTX2_FLAG_INTF_DOWN;
0568     hw = &vf->hw;
0569     hw->pdev = vf->pdev;
0570     hw->rx_queues = qcount;
0571     hw->tx_queues = qcount;
0572     hw->max_queues = qcount;
0573     hw->tot_tx_queues = qcount;
0574     hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
0575     /* Use CQE of 128 byte descriptor size by default */
0576     hw->xqe_size = 128;
0577 
0578     hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
0579                       GFP_KERNEL);
0580     if (!hw->irq_name) {
0581         err = -ENOMEM;
0582         goto err_free_netdev;
0583     }
0584 
0585     hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
0586                      sizeof(cpumask_var_t), GFP_KERNEL);
0587     if (!hw->affinity_mask) {
0588         err = -ENOMEM;
0589         goto err_free_netdev;
0590     }
0591 
0592     err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
0593     if (err < 0) {
0594         dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
0595             __func__, num_vec);
0596         goto err_free_netdev;
0597     }
0598 
0599     vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
0600     if (!vf->reg_base) {
0601         dev_err(dev, "Unable to map physical function CSRs, aborting\n");
0602         err = -ENOMEM;
0603         goto err_free_irq_vectors;
0604     }
0605 
0606     otx2_setup_dev_hw_settings(vf);
0607     /* Init VF <=> PF mailbox stuff */
0608     err = otx2vf_vfaf_mbox_init(vf);
0609     if (err)
0610         goto err_free_irq_vectors;
0611 
0612     /* Register mailbox interrupt */
0613     err = otx2vf_register_mbox_intr(vf, true);
0614     if (err)
0615         goto err_mbox_destroy;
0616 
0617     /* Request AF to attach NPA and LIX LFs to this AF */
0618     err = otx2_attach_npa_nix(vf);
0619     if (err)
0620         goto err_disable_mbox_intr;
0621 
0622     err = otx2vf_realloc_msix_vectors(vf);
0623     if (err)
0624         goto err_mbox_destroy;
0625 
0626     err = otx2_set_real_num_queues(netdev, qcount, qcount);
0627     if (err)
0628         goto err_detach_rsrc;
0629 
0630     err = cn10k_lmtst_init(vf);
0631     if (err)
0632         goto err_detach_rsrc;
0633 
0634     /* Don't check for error.  Proceed without ptp */
0635     otx2_ptp_init(vf);
0636 
0637     /* Assign default mac address */
0638     otx2_get_mac_from_af(netdev);
0639 
0640     netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
0641                   NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
0642                   NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
0643                   NETIF_F_GSO_UDP_L4;
0644     netdev->features = netdev->hw_features;
0645     /* Support TSO on tag interface */
0646     netdev->vlan_features |= netdev->features;
0647     netdev->hw_features  |= NETIF_F_HW_VLAN_CTAG_TX |
0648                 NETIF_F_HW_VLAN_STAG_TX;
0649     netdev->features |= netdev->hw_features;
0650 
0651     netdev->hw_features |= NETIF_F_NTUPLE;
0652     netdev->hw_features |= NETIF_F_RXALL;
0653     netdev->hw_features |= NETIF_F_HW_TC;
0654 
0655     netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
0656     netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
0657 
0658     netdev->netdev_ops = &otx2vf_netdev_ops;
0659 
0660     netdev->min_mtu = OTX2_MIN_MTU;
0661     netdev->max_mtu = otx2_get_max_mtu(vf);
0662 
0663     /* To distinguish, for LBK VFs set netdev name explicitly */
0664     if (is_otx2_lbkvf(vf->pdev)) {
0665         int n;
0666 
0667         n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
0668         /* Need to subtract 1 to get proper VF number */
0669         n -= 1;
0670         snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
0671     }
0672 
0673     err = register_netdev(netdev);
0674     if (err) {
0675         dev_err(dev, "Failed to register netdevice\n");
0676         goto err_ptp_destroy;
0677     }
0678 
0679     err = otx2_wq_init(vf);
0680     if (err)
0681         goto err_unreg_netdev;
0682 
0683     otx2vf_set_ethtool_ops(netdev);
0684 
0685     err = otx2vf_mcam_flow_init(vf);
0686     if (err)
0687         goto err_unreg_netdev;
0688 
0689     err = otx2_init_tc(vf);
0690     if (err)
0691         goto err_unreg_netdev;
0692 
0693     err = otx2_register_dl(vf);
0694     if (err)
0695         goto err_shutdown_tc;
0696 
0697 #ifdef CONFIG_DCB
0698     err = otx2_dcbnl_set_ops(netdev);
0699     if (err)
0700         goto err_shutdown_tc;
0701 #endif
0702 
0703     return 0;
0704 
0705 err_shutdown_tc:
0706     otx2_shutdown_tc(vf);
0707 err_unreg_netdev:
0708     unregister_netdev(netdev);
0709 err_ptp_destroy:
0710     otx2_ptp_destroy(vf);
0711 err_detach_rsrc:
0712     if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
0713         qmem_free(vf->dev, vf->dync_lmt);
0714     otx2_detach_resources(&vf->mbox);
0715 err_disable_mbox_intr:
0716     otx2vf_disable_mbox_intr(vf);
0717 err_mbox_destroy:
0718     otx2vf_vfaf_mbox_destroy(vf);
0719 err_free_irq_vectors:
0720     pci_free_irq_vectors(hw->pdev);
0721 err_free_netdev:
0722     pci_set_drvdata(pdev, NULL);
0723     free_netdev(netdev);
0724 err_release_regions:
0725     pci_release_regions(pdev);
0726     return err;
0727 }
0728 
0729 static void otx2vf_remove(struct pci_dev *pdev)
0730 {
0731     struct net_device *netdev = pci_get_drvdata(pdev);
0732     struct otx2_nic *vf;
0733 
0734     if (!netdev)
0735         return;
0736 
0737     vf = netdev_priv(netdev);
0738 
0739     /* Disable 802.3x pause frames */
0740     if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
0741         (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
0742         vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
0743         vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
0744         otx2_config_pause_frm(vf);
0745     }
0746 
0747 #ifdef CONFIG_DCB
0748     /* Disable PFC config */
0749     if (vf->pfc_en) {
0750         vf->pfc_en = 0;
0751         otx2_config_priority_flow_ctrl(vf);
0752     }
0753 #endif
0754 
0755     cancel_work_sync(&vf->reset_task);
0756     otx2_unregister_dl(vf);
0757     unregister_netdev(netdev);
0758     if (vf->otx2_wq)
0759         destroy_workqueue(vf->otx2_wq);
0760     otx2_ptp_destroy(vf);
0761     otx2vf_disable_mbox_intr(vf);
0762     otx2_detach_resources(&vf->mbox);
0763     if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
0764         qmem_free(vf->dev, vf->dync_lmt);
0765     otx2vf_vfaf_mbox_destroy(vf);
0766     pci_free_irq_vectors(vf->pdev);
0767     pci_set_drvdata(pdev, NULL);
0768     free_netdev(netdev);
0769 
0770     pci_release_regions(pdev);
0771 }
0772 
0773 static struct pci_driver otx2vf_driver = {
0774     .name = DRV_NAME,
0775     .id_table = otx2_vf_id_table,
0776     .probe = otx2vf_probe,
0777     .remove = otx2vf_remove,
0778     .shutdown = otx2vf_remove,
0779 };
0780 
0781 static int __init otx2vf_init_module(void)
0782 {
0783     pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
0784 
0785     return pci_register_driver(&otx2vf_driver);
0786 }
0787 
0788 static void __exit otx2vf_cleanup_module(void)
0789 {
0790     pci_unregister_driver(&otx2vf_driver);
0791 }
0792 
0793 module_init(otx2vf_init_module);
0794 module_exit(otx2vf_cleanup_module);