Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Intel IFC VF NIC driver for virtio dataplane offloading
0004  *
0005  * Copyright (C) 2020 Intel Corporation.
0006  *
0007  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
0008  *
0009  */
0010 
0011 #include <linux/interrupt.h>
0012 #include <linux/module.h>
0013 #include <linux/pci.h>
0014 #include <linux/sysfs.h>
0015 #include "ifcvf_base.h"
0016 
0017 #define DRIVER_AUTHOR   "Intel Corporation"
0018 #define IFCVF_DRIVER_NAME       "ifcvf"
0019 
0020 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
0021 {
0022     struct ifcvf_hw *vf = arg;
0023 
0024     if (vf->config_cb.callback)
0025         return vf->config_cb.callback(vf->config_cb.private);
0026 
0027     return IRQ_HANDLED;
0028 }
0029 
0030 static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
0031 {
0032     struct vring_info *vring = arg;
0033 
0034     if (vring->cb.callback)
0035         return vring->cb.callback(vring->cb.private);
0036 
0037     return IRQ_HANDLED;
0038 }
0039 
0040 static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
0041 {
0042     struct ifcvf_hw *vf = arg;
0043     struct vring_info *vring;
0044     int i;
0045 
0046     for (i = 0; i < vf->nr_vring; i++) {
0047         vring = &vf->vring[i];
0048         if (vring->cb.callback)
0049             vring->cb.callback(vring->cb.private);
0050     }
0051 
0052     return IRQ_HANDLED;
0053 }
0054 
0055 static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
0056 {
0057     struct ifcvf_hw *vf = arg;
0058     u8 isr;
0059 
0060     isr = vp_ioread8(vf->isr);
0061     if (isr & VIRTIO_PCI_ISR_CONFIG)
0062         ifcvf_config_changed(irq, arg);
0063 
0064     return ifcvf_vqs_reused_intr_handler(irq, arg);
0065 }
0066 
0067 static void ifcvf_free_irq_vectors(void *data)
0068 {
0069     pci_free_irq_vectors(data);
0070 }
0071 
0072 static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
0073 {
0074     struct pci_dev *pdev = adapter->pdev;
0075     struct ifcvf_hw *vf = &adapter->vf;
0076     int i;
0077 
0078     for (i = 0; i < vf->nr_vring; i++) {
0079         if (vf->vring[i].irq != -EINVAL) {
0080             devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
0081             vf->vring[i].irq = -EINVAL;
0082         }
0083     }
0084 }
0085 
0086 static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
0087 {
0088     struct pci_dev *pdev = adapter->pdev;
0089     struct ifcvf_hw *vf = &adapter->vf;
0090 
0091     if (vf->vqs_reused_irq != -EINVAL) {
0092         devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
0093         vf->vqs_reused_irq = -EINVAL;
0094     }
0095 
0096 }
0097 
0098 static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
0099 {
0100     struct ifcvf_hw *vf = &adapter->vf;
0101 
0102     if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
0103         ifcvf_free_per_vq_irq(adapter);
0104     else
0105         ifcvf_free_vqs_reused_irq(adapter);
0106 }
0107 
0108 static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
0109 {
0110     struct pci_dev *pdev = adapter->pdev;
0111     struct ifcvf_hw *vf = &adapter->vf;
0112 
0113     if (vf->config_irq == -EINVAL)
0114         return;
0115 
0116     /* If the irq is shared by all vqs and the config interrupt,
0117      * it is already freed in ifcvf_free_vq_irq, so here only
0118      * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
0119      */
0120     if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
0121         devm_free_irq(&pdev->dev, vf->config_irq, vf);
0122         vf->config_irq = -EINVAL;
0123     }
0124 }
0125 
0126 static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
0127 {
0128     struct pci_dev *pdev = adapter->pdev;
0129 
0130     ifcvf_free_vq_irq(adapter);
0131     ifcvf_free_config_irq(adapter);
0132     ifcvf_free_irq_vectors(pdev);
0133 }
0134 
0135 /* ifcvf MSIX vectors allocator, this helper tries to allocate
0136  * vectors for all virtqueues and the config interrupt.
0137  * It returns the number of allocated vectors, negative
0138  * return value when fails.
0139  */
0140 static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
0141 {
0142     struct pci_dev *pdev = adapter->pdev;
0143     struct ifcvf_hw *vf = &adapter->vf;
0144     int max_intr, ret;
0145 
0146     /* all queues and config interrupt  */
0147     max_intr = vf->nr_vring + 1;
0148     ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
0149 
0150     if (ret < 0) {
0151         IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
0152         return ret;
0153     }
0154 
0155     if (ret < max_intr)
0156         IFCVF_INFO(pdev,
0157                "Requested %u vectors, however only %u allocated, lower performance\n",
0158                max_intr, ret);
0159 
0160     return ret;
0161 }
0162 
0163 static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
0164 {
0165     struct pci_dev *pdev = adapter->pdev;
0166     struct ifcvf_hw *vf = &adapter->vf;
0167     int i, vector, ret, irq;
0168 
0169     vf->vqs_reused_irq = -EINVAL;
0170     for (i = 0; i < vf->nr_vring; i++) {
0171         snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
0172         vector = i;
0173         irq = pci_irq_vector(pdev, vector);
0174         ret = devm_request_irq(&pdev->dev, irq,
0175                        ifcvf_vq_intr_handler, 0,
0176                        vf->vring[i].msix_name,
0177                        &vf->vring[i]);
0178         if (ret) {
0179             IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
0180             goto err;
0181         }
0182 
0183         vf->vring[i].irq = irq;
0184         ret = ifcvf_set_vq_vector(vf, i, vector);
0185         if (ret == VIRTIO_MSI_NO_VECTOR) {
0186             IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
0187             goto err;
0188         }
0189     }
0190 
0191     return 0;
0192 err:
0193     ifcvf_free_irq(adapter);
0194 
0195     return -EFAULT;
0196 }
0197 
0198 static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
0199 {
0200     struct pci_dev *pdev = adapter->pdev;
0201     struct ifcvf_hw *vf = &adapter->vf;
0202     int i, vector, ret, irq;
0203 
0204     vector = 0;
0205     snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
0206     irq = pci_irq_vector(pdev, vector);
0207     ret = devm_request_irq(&pdev->dev, irq,
0208                    ifcvf_vqs_reused_intr_handler, 0,
0209                    vf->vring[0].msix_name, vf);
0210     if (ret) {
0211         IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
0212         goto err;
0213     }
0214 
0215     vf->vqs_reused_irq = irq;
0216     for (i = 0; i < vf->nr_vring; i++) {
0217         vf->vring[i].irq = -EINVAL;
0218         ret = ifcvf_set_vq_vector(vf, i, vector);
0219         if (ret == VIRTIO_MSI_NO_VECTOR) {
0220             IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
0221             goto err;
0222         }
0223     }
0224 
0225     return 0;
0226 err:
0227     ifcvf_free_irq(adapter);
0228 
0229     return -EFAULT;
0230 }
0231 
0232 static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
0233 {
0234     struct pci_dev *pdev = adapter->pdev;
0235     struct ifcvf_hw *vf = &adapter->vf;
0236     int i, vector, ret, irq;
0237 
0238     vector = 0;
0239     snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
0240     irq = pci_irq_vector(pdev, vector);
0241     ret = devm_request_irq(&pdev->dev, irq,
0242                    ifcvf_dev_intr_handler, 0,
0243                    vf->vring[0].msix_name, vf);
0244     if (ret) {
0245         IFCVF_ERR(pdev, "Failed to request irq for the device\n");
0246         goto err;
0247     }
0248 
0249     vf->vqs_reused_irq = irq;
0250     for (i = 0; i < vf->nr_vring; i++) {
0251         vf->vring[i].irq = -EINVAL;
0252         ret = ifcvf_set_vq_vector(vf, i, vector);
0253         if (ret == VIRTIO_MSI_NO_VECTOR) {
0254             IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
0255             goto err;
0256         }
0257     }
0258 
0259     vf->config_irq = irq;
0260     ret = ifcvf_set_config_vector(vf, vector);
0261     if (ret == VIRTIO_MSI_NO_VECTOR) {
0262         IFCVF_ERR(pdev, "No msix vector for device config\n");
0263         goto err;
0264     }
0265 
0266     return 0;
0267 err:
0268     ifcvf_free_irq(adapter);
0269 
0270     return -EFAULT;
0271 
0272 }
0273 
0274 static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
0275 {
0276     struct ifcvf_hw *vf = &adapter->vf;
0277     int ret;
0278 
0279     if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
0280         ret = ifcvf_request_per_vq_irq(adapter);
0281     else
0282         ret = ifcvf_request_vqs_reused_irq(adapter);
0283 
0284     return ret;
0285 }
0286 
0287 static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
0288 {
0289     struct pci_dev *pdev = adapter->pdev;
0290     struct ifcvf_hw *vf = &adapter->vf;
0291     int config_vector, ret;
0292 
0293     if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
0294         config_vector = vf->nr_vring;
0295     else if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
0296         /* vector 0 for vqs and 1 for config interrupt */
0297         config_vector = 1;
0298     else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
0299         /* re-use the vqs vector */
0300         return 0;
0301     else
0302         return -EINVAL;
0303 
0304     snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
0305          pci_name(pdev));
0306     vf->config_irq = pci_irq_vector(pdev, config_vector);
0307     ret = devm_request_irq(&pdev->dev, vf->config_irq,
0308                    ifcvf_config_changed, 0,
0309                    vf->config_msix_name, vf);
0310     if (ret) {
0311         IFCVF_ERR(pdev, "Failed to request config irq\n");
0312         goto err;
0313     }
0314 
0315     ret = ifcvf_set_config_vector(vf, config_vector);
0316     if (ret == VIRTIO_MSI_NO_VECTOR) {
0317         IFCVF_ERR(pdev, "No msix vector for device config\n");
0318         goto err;
0319     }
0320 
0321     return 0;
0322 err:
0323     ifcvf_free_irq(adapter);
0324 
0325     return -EFAULT;
0326 }
0327 
0328 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
0329 {
0330     struct ifcvf_hw *vf = &adapter->vf;
0331     int nvectors, ret, max_intr;
0332 
0333     nvectors = ifcvf_alloc_vectors(adapter);
0334     if (nvectors <= 0)
0335         return -EFAULT;
0336 
0337     vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
0338     max_intr = vf->nr_vring + 1;
0339     if (nvectors < max_intr)
0340         vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
0341 
0342     if (nvectors == 1) {
0343         vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
0344         ret = ifcvf_request_dev_irq(adapter);
0345 
0346         return ret;
0347     }
0348 
0349     ret = ifcvf_request_vq_irq(adapter);
0350     if (ret)
0351         return ret;
0352 
0353     ret = ifcvf_request_config_irq(adapter);
0354 
0355     if (ret)
0356         return ret;
0357 
0358     return 0;
0359 }
0360 
0361 static int ifcvf_start_datapath(void *private)
0362 {
0363     struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
0364     u8 status;
0365     int ret;
0366 
0367     ret = ifcvf_start_hw(vf);
0368     if (ret < 0) {
0369         status = ifcvf_get_status(vf);
0370         status |= VIRTIO_CONFIG_S_FAILED;
0371         ifcvf_set_status(vf, status);
0372     }
0373 
0374     return ret;
0375 }
0376 
0377 static int ifcvf_stop_datapath(void *private)
0378 {
0379     struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
0380     int i;
0381 
0382     for (i = 0; i < vf->nr_vring; i++)
0383         vf->vring[i].cb.callback = NULL;
0384 
0385     ifcvf_stop_hw(vf);
0386 
0387     return 0;
0388 }
0389 
0390 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
0391 {
0392     struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
0393     int i;
0394 
0395     for (i = 0; i < vf->nr_vring; i++) {
0396         vf->vring[i].last_avail_idx = 0;
0397         vf->vring[i].desc = 0;
0398         vf->vring[i].avail = 0;
0399         vf->vring[i].used = 0;
0400         vf->vring[i].ready = 0;
0401         vf->vring[i].cb.callback = NULL;
0402         vf->vring[i].cb.private = NULL;
0403     }
0404 
0405     ifcvf_reset(vf);
0406 }
0407 
0408 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
0409 {
0410     return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
0411 }
0412 
0413 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
0414 {
0415     struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
0416 
0417     return &adapter->vf;
0418 }
0419 
0420 static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
0421 {
0422     struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
0423     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0424     struct pci_dev *pdev = adapter->pdev;
0425     u32 type = vf->dev_type;
0426     u64 features;
0427 
0428     if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
0429         features = ifcvf_get_features(vf);
0430     else {
0431         features = 0;
0432         IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
0433     }
0434 
0435     return features;
0436 }
0437 
0438 static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
0439 {
0440     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0441     int ret;
0442 
0443     ret = ifcvf_verify_min_features(vf, features);
0444     if (ret)
0445         return ret;
0446 
0447     vf->req_features = features;
0448 
0449     return 0;
0450 }
0451 
0452 static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
0453 {
0454     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0455 
0456     return vf->req_features;
0457 }
0458 
0459 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
0460 {
0461     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0462 
0463     return ifcvf_get_status(vf);
0464 }
0465 
0466 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
0467 {
0468     struct ifcvf_adapter *adapter;
0469     struct ifcvf_hw *vf;
0470     u8 status_old;
0471     int ret;
0472 
0473     vf  = vdpa_to_vf(vdpa_dev);
0474     adapter = vdpa_to_adapter(vdpa_dev);
0475     status_old = ifcvf_get_status(vf);
0476 
0477     if (status_old == status)
0478         return;
0479 
0480     if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
0481         !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
0482         ret = ifcvf_request_irq(adapter);
0483         if (ret) {
0484             status = ifcvf_get_status(vf);
0485             status |= VIRTIO_CONFIG_S_FAILED;
0486             ifcvf_set_status(vf, status);
0487             return;
0488         }
0489 
0490         if (ifcvf_start_datapath(adapter) < 0)
0491             IFCVF_ERR(adapter->pdev,
0492                   "Failed to set ifcvf vdpa  status %u\n",
0493                   status);
0494     }
0495 
0496     ifcvf_set_status(vf, status);
0497 }
0498 
0499 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
0500 {
0501     struct ifcvf_adapter *adapter;
0502     struct ifcvf_hw *vf;
0503     u8 status_old;
0504 
0505     vf  = vdpa_to_vf(vdpa_dev);
0506     adapter = vdpa_to_adapter(vdpa_dev);
0507     status_old = ifcvf_get_status(vf);
0508 
0509     if (status_old == 0)
0510         return 0;
0511 
0512     if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
0513         ifcvf_stop_datapath(adapter);
0514         ifcvf_free_irq(adapter);
0515     }
0516 
0517     ifcvf_reset_vring(adapter);
0518 
0519     return 0;
0520 }
0521 
0522 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
0523 {
0524     return IFCVF_QUEUE_MAX;
0525 }
0526 
0527 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
0528                    struct vdpa_vq_state *state)
0529 {
0530     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0531 
0532     state->split.avail_index = ifcvf_get_vq_state(vf, qid);
0533     return 0;
0534 }
0535 
0536 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
0537                    const struct vdpa_vq_state *state)
0538 {
0539     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0540 
0541     return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
0542 }
0543 
0544 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
0545                  struct vdpa_callback *cb)
0546 {
0547     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0548 
0549     vf->vring[qid].cb = *cb;
0550 }
0551 
0552 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
0553                     u16 qid, bool ready)
0554 {
0555     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0556 
0557     vf->vring[qid].ready = ready;
0558 }
0559 
0560 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
0561 {
0562     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0563 
0564     return vf->vring[qid].ready;
0565 }
0566 
0567 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
0568                   u32 num)
0569 {
0570     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0571 
0572     vf->vring[qid].size = num;
0573 }
0574 
0575 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
0576                      u64 desc_area, u64 driver_area,
0577                      u64 device_area)
0578 {
0579     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0580 
0581     vf->vring[qid].desc = desc_area;
0582     vf->vring[qid].avail = driver_area;
0583     vf->vring[qid].used = device_area;
0584 
0585     return 0;
0586 }
0587 
0588 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
0589 {
0590     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0591 
0592     ifcvf_notify_queue(vf, qid);
0593 }
0594 
0595 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
0596 {
0597     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0598 
0599     return vp_ioread8(&vf->common_cfg->config_generation);
0600 }
0601 
0602 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
0603 {
0604     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0605 
0606     return vf->dev_type;
0607 }
0608 
0609 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
0610 {
0611     struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
0612     struct pci_dev *pdev = adapter->pdev;
0613 
0614     return pdev->subsystem_vendor;
0615 }
0616 
0617 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
0618 {
0619     return IFCVF_QUEUE_ALIGNMENT;
0620 }
0621 
0622 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
0623 {
0624     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0625 
0626     return  vf->config_size;
0627 }
0628 
0629 static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
0630 {
0631     return 0;
0632 }
0633 
0634 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
0635                   unsigned int offset,
0636                   void *buf, unsigned int len)
0637 {
0638     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0639 
0640     ifcvf_read_dev_config(vf, offset, buf, len);
0641 }
0642 
0643 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
0644                   unsigned int offset, const void *buf,
0645                   unsigned int len)
0646 {
0647     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0648 
0649     ifcvf_write_dev_config(vf, offset, buf, len);
0650 }
0651 
0652 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
0653                      struct vdpa_callback *cb)
0654 {
0655     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0656 
0657     vf->config_cb.callback = cb->callback;
0658     vf->config_cb.private = cb->private;
0659 }
0660 
0661 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
0662                  u16 qid)
0663 {
0664     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0665 
0666     if (vf->vqs_reused_irq < 0)
0667         return vf->vring[qid].irq;
0668     else
0669         return -EINVAL;
0670 }
0671 
0672 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
0673                                    u16 idx)
0674 {
0675     struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
0676     struct vdpa_notification_area area;
0677 
0678     area.addr = vf->vring[idx].notify_pa;
0679     if (!vf->notify_off_multiplier)
0680         area.size = PAGE_SIZE;
0681     else
0682         area.size = vf->notify_off_multiplier;
0683 
0684     return area;
0685 }
0686 
0687 /*
0688  * IFCVF currently doesn't have on-chip IOMMU, so not
0689  * implemented set_map()/dma_map()/dma_unmap()
0690  */
0691 static const struct vdpa_config_ops ifc_vdpa_ops = {
0692     .get_device_features = ifcvf_vdpa_get_device_features,
0693     .set_driver_features = ifcvf_vdpa_set_driver_features,
0694     .get_driver_features = ifcvf_vdpa_get_driver_features,
0695     .get_status = ifcvf_vdpa_get_status,
0696     .set_status = ifcvf_vdpa_set_status,
0697     .reset      = ifcvf_vdpa_reset,
0698     .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
0699     .get_vq_state   = ifcvf_vdpa_get_vq_state,
0700     .set_vq_state   = ifcvf_vdpa_set_vq_state,
0701     .set_vq_cb  = ifcvf_vdpa_set_vq_cb,
0702     .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
0703     .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
0704     .set_vq_num = ifcvf_vdpa_set_vq_num,
0705     .set_vq_address = ifcvf_vdpa_set_vq_address,
0706     .get_vq_irq = ifcvf_vdpa_get_vq_irq,
0707     .kick_vq    = ifcvf_vdpa_kick_vq,
0708     .get_generation = ifcvf_vdpa_get_generation,
0709     .get_device_id  = ifcvf_vdpa_get_device_id,
0710     .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
0711     .get_vq_align   = ifcvf_vdpa_get_vq_align,
0712     .get_vq_group   = ifcvf_vdpa_get_vq_group,
0713     .get_config_size    = ifcvf_vdpa_get_config_size,
0714     .get_config = ifcvf_vdpa_get_config,
0715     .set_config = ifcvf_vdpa_set_config,
0716     .set_config_cb  = ifcvf_vdpa_set_config_cb,
0717     .get_vq_notification = ifcvf_get_vq_notification,
0718 };
0719 
0720 static struct virtio_device_id id_table_net[] = {
0721     {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
0722     {0},
0723 };
0724 
0725 static struct virtio_device_id id_table_blk[] = {
0726     {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
0727     {0},
0728 };
0729 
0730 static u32 get_dev_type(struct pci_dev *pdev)
0731 {
0732     u32 dev_type;
0733 
0734     /* This drirver drives both modern virtio devices and transitional
0735      * devices in modern mode.
0736      * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
0737      * so legacy devices and transitional devices in legacy
0738      * mode will not work for vDPA, this driver will not
0739      * drive devices with legacy interface.
0740      */
0741 
0742     if (pdev->device < 0x1040)
0743         dev_type =  pdev->subsystem_device;
0744     else
0745         dev_type =  pdev->device - 0x1040;
0746 
0747     return dev_type;
0748 }
0749 
0750 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
0751                   const struct vdpa_dev_set_config *config)
0752 {
0753     struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
0754     struct ifcvf_adapter *adapter;
0755     struct vdpa_device *vdpa_dev;
0756     struct pci_dev *pdev;
0757     struct ifcvf_hw *vf;
0758     int ret;
0759 
0760     ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
0761     if (!ifcvf_mgmt_dev->adapter)
0762         return -EOPNOTSUPP;
0763 
0764     adapter = ifcvf_mgmt_dev->adapter;
0765     vf = &adapter->vf;
0766     pdev = adapter->pdev;
0767     vdpa_dev = &adapter->vdpa;
0768 
0769     if (name)
0770         ret = dev_set_name(&vdpa_dev->dev, "%s", name);
0771     else
0772         ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
0773 
0774     ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
0775     if (ret) {
0776         put_device(&adapter->vdpa.dev);
0777         IFCVF_ERR(pdev, "Failed to register to vDPA bus");
0778         return ret;
0779     }
0780 
0781     return 0;
0782 }
0783 
0784 
0785 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
0786 {
0787     struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
0788 
0789     ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
0790     _vdpa_unregister_device(dev);
0791     ifcvf_mgmt_dev->adapter = NULL;
0792 }
0793 
0794 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
0795     .dev_add = ifcvf_vdpa_dev_add,
0796     .dev_del = ifcvf_vdpa_dev_del
0797 };
0798 
0799 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
0800 {
0801     struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
0802     struct device *dev = &pdev->dev;
0803     struct ifcvf_adapter *adapter;
0804     struct ifcvf_hw *vf;
0805     u32 dev_type;
0806     int ret, i;
0807 
0808     ret = pcim_enable_device(pdev);
0809     if (ret) {
0810         IFCVF_ERR(pdev, "Failed to enable device\n");
0811         return ret;
0812     }
0813     ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
0814                  IFCVF_DRIVER_NAME);
0815     if (ret) {
0816         IFCVF_ERR(pdev, "Failed to request MMIO region\n");
0817         return ret;
0818     }
0819 
0820     ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
0821     if (ret) {
0822         IFCVF_ERR(pdev, "No usable DMA configuration\n");
0823         return ret;
0824     }
0825 
0826     ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
0827     if (ret) {
0828         IFCVF_ERR(pdev,
0829               "Failed for adding devres for freeing irq vectors\n");
0830         return ret;
0831     }
0832 
0833     pci_set_master(pdev);
0834 
0835     adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
0836                     dev, &ifc_vdpa_ops, 1, 1, NULL, false);
0837     if (IS_ERR(adapter)) {
0838         IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
0839         return PTR_ERR(adapter);
0840     }
0841 
0842     vf = &adapter->vf;
0843     vf->dev_type = get_dev_type(pdev);
0844     vf->base = pcim_iomap_table(pdev);
0845 
0846     adapter->pdev = pdev;
0847     adapter->vdpa.dma_dev = &pdev->dev;
0848 
0849     ret = ifcvf_init_hw(vf, pdev);
0850     if (ret) {
0851         IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
0852         return ret;
0853     }
0854 
0855     for (i = 0; i < vf->nr_vring; i++)
0856         vf->vring[i].irq = -EINVAL;
0857 
0858     vf->hw_features = ifcvf_get_hw_features(vf);
0859     vf->config_size = ifcvf_get_config_size(vf);
0860 
0861     ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
0862     if (!ifcvf_mgmt_dev) {
0863         IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
0864         return -ENOMEM;
0865     }
0866 
0867     ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
0868     ifcvf_mgmt_dev->mdev.device = dev;
0869     ifcvf_mgmt_dev->adapter = adapter;
0870 
0871     dev_type = get_dev_type(pdev);
0872     switch (dev_type) {
0873     case VIRTIO_ID_NET:
0874         ifcvf_mgmt_dev->mdev.id_table = id_table_net;
0875         break;
0876     case VIRTIO_ID_BLOCK:
0877         ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
0878         break;
0879     default:
0880         IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
0881         ret = -EOPNOTSUPP;
0882         goto err;
0883     }
0884 
0885     ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
0886     ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
0887 
0888     adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
0889 
0890 
0891     ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
0892     if (ret) {
0893         IFCVF_ERR(pdev,
0894               "Failed to initialize the management interfaces\n");
0895         goto err;
0896     }
0897 
0898     pci_set_drvdata(pdev, ifcvf_mgmt_dev);
0899 
0900     return 0;
0901 
0902 err:
0903     kfree(ifcvf_mgmt_dev);
0904     return ret;
0905 }
0906 
0907 static void ifcvf_remove(struct pci_dev *pdev)
0908 {
0909     struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
0910 
0911     ifcvf_mgmt_dev = pci_get_drvdata(pdev);
0912     vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
0913     kfree(ifcvf_mgmt_dev);
0914 }
0915 
0916 static struct pci_device_id ifcvf_pci_ids[] = {
0917     /* N3000 network device */
0918     { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
0919              N3000_DEVICE_ID,
0920              PCI_VENDOR_ID_INTEL,
0921              N3000_SUBSYS_DEVICE_ID) },
0922     /* C5000X-PL network device */
0923     { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
0924              VIRTIO_TRANS_ID_NET,
0925              PCI_VENDOR_ID_INTEL,
0926              VIRTIO_ID_NET) },
0927     /* C5000X-PL block device */
0928     { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
0929              VIRTIO_TRANS_ID_BLOCK,
0930              PCI_VENDOR_ID_INTEL,
0931              VIRTIO_ID_BLOCK) },
0932 
0933     { 0 },
0934 };
0935 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
0936 
0937 static struct pci_driver ifcvf_driver = {
0938     .name     = IFCVF_DRIVER_NAME,
0939     .id_table = ifcvf_pci_ids,
0940     .probe    = ifcvf_probe,
0941     .remove   = ifcvf_remove,
0942 };
0943 
0944 module_pci_driver(ifcvf_driver);
0945 
0946 MODULE_LICENSE("GPL v2");