Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * VDPA device simulator core.
0004  *
0005  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
0006  *     Author: Jason Wang <jasowang@redhat.com>
0007  *
0008  */
0009 
0010 #include <linux/init.h>
0011 #include <linux/module.h>
0012 #include <linux/device.h>
0013 #include <linux/kernel.h>
0014 #include <linux/slab.h>
0015 #include <linux/sched.h>
0016 #include <linux/dma-map-ops.h>
0017 #include <linux/vringh.h>
0018 #include <linux/vdpa.h>
0019 #include <linux/vhost_iotlb.h>
0020 #include <linux/iova.h>
0021 
0022 #include "vdpa_sim.h"
0023 
0024 #define DRV_VERSION  "0.1"
0025 #define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
0026 #define DRV_DESC     "vDPA Device Simulator core"
0027 #define DRV_LICENSE  "GPL v2"
0028 
0029 static int batch_mapping = 1;
0030 module_param(batch_mapping, int, 0444);
0031 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
0032 
0033 static int max_iotlb_entries = 2048;
0034 module_param(max_iotlb_entries, int, 0444);
0035 MODULE_PARM_DESC(max_iotlb_entries,
0036          "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
0037 
0038 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
0039 #define VDPASIM_QUEUE_MAX 256
0040 #define VDPASIM_VENDOR_ID 0
0041 
0042 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
0043 {
0044     return container_of(vdpa, struct vdpasim, vdpa);
0045 }
0046 
0047 static struct vdpasim *dev_to_sim(struct device *dev)
0048 {
0049     struct vdpa_device *vdpa = dev_to_vdpa(dev);
0050 
0051     return vdpa_to_sim(vdpa);
0052 }
0053 
0054 static void vdpasim_vq_notify(struct vringh *vring)
0055 {
0056     struct vdpasim_virtqueue *vq =
0057         container_of(vring, struct vdpasim_virtqueue, vring);
0058 
0059     if (!vq->cb)
0060         return;
0061 
0062     vq->cb(vq->private);
0063 }
0064 
0065 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
0066 {
0067     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0068 
0069     vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
0070               VDPASIM_QUEUE_MAX, false,
0071               (struct vring_desc *)(uintptr_t)vq->desc_addr,
0072               (struct vring_avail *)
0073               (uintptr_t)vq->driver_addr,
0074               (struct vring_used *)
0075               (uintptr_t)vq->device_addr);
0076 
0077     vq->vring.notify = vdpasim_vq_notify;
0078 }
0079 
0080 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
0081                  struct vdpasim_virtqueue *vq)
0082 {
0083     vq->ready = false;
0084     vq->desc_addr = 0;
0085     vq->driver_addr = 0;
0086     vq->device_addr = 0;
0087     vq->cb = NULL;
0088     vq->private = NULL;
0089     vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
0090               VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
0091 
0092     vq->vring.notify = NULL;
0093 }
0094 
0095 static void vdpasim_do_reset(struct vdpasim *vdpasim)
0096 {
0097     int i;
0098 
0099     spin_lock(&vdpasim->iommu_lock);
0100 
0101     for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
0102         vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
0103         vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
0104                  &vdpasim->iommu_lock);
0105     }
0106 
0107     for (i = 0; i < vdpasim->dev_attr.nas; i++)
0108         vhost_iotlb_reset(&vdpasim->iommu[i]);
0109 
0110     vdpasim->running = true;
0111     spin_unlock(&vdpasim->iommu_lock);
0112 
0113     vdpasim->features = 0;
0114     vdpasim->status = 0;
0115     ++vdpasim->generation;
0116 }
0117 
0118 static int dir_to_perm(enum dma_data_direction dir)
0119 {
0120     int perm = -EFAULT;
0121 
0122     switch (dir) {
0123     case DMA_FROM_DEVICE:
0124         perm = VHOST_MAP_WO;
0125         break;
0126     case DMA_TO_DEVICE:
0127         perm = VHOST_MAP_RO;
0128         break;
0129     case DMA_BIDIRECTIONAL:
0130         perm = VHOST_MAP_RW;
0131         break;
0132     default:
0133         break;
0134     }
0135 
0136     return perm;
0137 }
0138 
0139 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
0140                     size_t size, unsigned int perm)
0141 {
0142     struct iova *iova;
0143     dma_addr_t dma_addr;
0144     int ret;
0145 
0146     /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
0147     iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
0148               ULONG_MAX - 1, true);
0149     if (!iova)
0150         return DMA_MAPPING_ERROR;
0151 
0152     dma_addr = iova_dma_addr(&vdpasim->iova, iova);
0153 
0154     spin_lock(&vdpasim->iommu_lock);
0155     ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
0156                     (u64)dma_addr + size - 1, (u64)paddr, perm);
0157     spin_unlock(&vdpasim->iommu_lock);
0158 
0159     if (ret) {
0160         __free_iova(&vdpasim->iova, iova);
0161         return DMA_MAPPING_ERROR;
0162     }
0163 
0164     return dma_addr;
0165 }
0166 
0167 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
0168                 size_t size)
0169 {
0170     spin_lock(&vdpasim->iommu_lock);
0171     vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
0172                   (u64)dma_addr + size - 1);
0173     spin_unlock(&vdpasim->iommu_lock);
0174 
0175     free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
0176 }
0177 
0178 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
0179                    unsigned long offset, size_t size,
0180                    enum dma_data_direction dir,
0181                    unsigned long attrs)
0182 {
0183     struct vdpasim *vdpasim = dev_to_sim(dev);
0184     phys_addr_t paddr = page_to_phys(page) + offset;
0185     int perm = dir_to_perm(dir);
0186 
0187     if (perm < 0)
0188         return DMA_MAPPING_ERROR;
0189 
0190     return vdpasim_map_range(vdpasim, paddr, size, perm);
0191 }
0192 
0193 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
0194                    size_t size, enum dma_data_direction dir,
0195                    unsigned long attrs)
0196 {
0197     struct vdpasim *vdpasim = dev_to_sim(dev);
0198 
0199     vdpasim_unmap_range(vdpasim, dma_addr, size);
0200 }
0201 
0202 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
0203                     dma_addr_t *dma_addr, gfp_t flag,
0204                     unsigned long attrs)
0205 {
0206     struct vdpasim *vdpasim = dev_to_sim(dev);
0207     phys_addr_t paddr;
0208     void *addr;
0209 
0210     addr = kmalloc(size, flag);
0211     if (!addr) {
0212         *dma_addr = DMA_MAPPING_ERROR;
0213         return NULL;
0214     }
0215 
0216     paddr = virt_to_phys(addr);
0217 
0218     *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
0219     if (*dma_addr == DMA_MAPPING_ERROR) {
0220         kfree(addr);
0221         return NULL;
0222     }
0223 
0224     return addr;
0225 }
0226 
0227 static void vdpasim_free_coherent(struct device *dev, size_t size,
0228                   void *vaddr, dma_addr_t dma_addr,
0229                   unsigned long attrs)
0230 {
0231     struct vdpasim *vdpasim = dev_to_sim(dev);
0232 
0233     vdpasim_unmap_range(vdpasim, dma_addr, size);
0234 
0235     kfree(vaddr);
0236 }
0237 
0238 static const struct dma_map_ops vdpasim_dma_ops = {
0239     .map_page = vdpasim_map_page,
0240     .unmap_page = vdpasim_unmap_page,
0241     .alloc = vdpasim_alloc_coherent,
0242     .free = vdpasim_free_coherent,
0243 };
0244 
0245 static const struct vdpa_config_ops vdpasim_config_ops;
0246 static const struct vdpa_config_ops vdpasim_batch_config_ops;
0247 
0248 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
0249 {
0250     const struct vdpa_config_ops *ops;
0251     struct vdpasim *vdpasim;
0252     struct device *dev;
0253     int i, ret = -ENOMEM;
0254 
0255     if (batch_mapping)
0256         ops = &vdpasim_batch_config_ops;
0257     else
0258         ops = &vdpasim_config_ops;
0259 
0260     vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
0261                     dev_attr->ngroups, dev_attr->nas,
0262                     dev_attr->name, false);
0263     if (IS_ERR(vdpasim)) {
0264         ret = PTR_ERR(vdpasim);
0265         goto err_alloc;
0266     }
0267 
0268     vdpasim->dev_attr = *dev_attr;
0269     INIT_WORK(&vdpasim->work, dev_attr->work_fn);
0270     spin_lock_init(&vdpasim->lock);
0271     spin_lock_init(&vdpasim->iommu_lock);
0272 
0273     dev = &vdpasim->vdpa.dev;
0274     dev->dma_mask = &dev->coherent_dma_mask;
0275     if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
0276         goto err_iommu;
0277     set_dma_ops(dev, &vdpasim_dma_ops);
0278     vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
0279 
0280     vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
0281     if (!vdpasim->config)
0282         goto err_iommu;
0283 
0284     vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
0285                    GFP_KERNEL);
0286     if (!vdpasim->vqs)
0287         goto err_iommu;
0288 
0289     vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
0290                        sizeof(*vdpasim->iommu), GFP_KERNEL);
0291     if (!vdpasim->iommu)
0292         goto err_iommu;
0293 
0294     for (i = 0; i < vdpasim->dev_attr.nas; i++)
0295         vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
0296 
0297     vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
0298     if (!vdpasim->buffer)
0299         goto err_iommu;
0300 
0301     for (i = 0; i < dev_attr->nvqs; i++)
0302         vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
0303                  &vdpasim->iommu_lock);
0304 
0305     ret = iova_cache_get();
0306     if (ret)
0307         goto err_iommu;
0308 
0309     /* For simplicity we use an IOVA allocator with byte granularity */
0310     init_iova_domain(&vdpasim->iova, 1, 0);
0311 
0312     vdpasim->vdpa.dma_dev = dev;
0313 
0314     return vdpasim;
0315 
0316 err_iommu:
0317     put_device(dev);
0318 err_alloc:
0319     return ERR_PTR(ret);
0320 }
0321 EXPORT_SYMBOL_GPL(vdpasim_create);
0322 
0323 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
0324                   u64 desc_area, u64 driver_area,
0325                   u64 device_area)
0326 {
0327     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0328     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0329 
0330     vq->desc_addr = desc_area;
0331     vq->driver_addr = driver_area;
0332     vq->device_addr = device_area;
0333 
0334     return 0;
0335 }
0336 
0337 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
0338 {
0339     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0340     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0341 
0342     vq->num = num;
0343 }
0344 
0345 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
0346 {
0347     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0348     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0349 
0350     if (vq->ready)
0351         schedule_work(&vdpasim->work);
0352 }
0353 
0354 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
0355                   struct vdpa_callback *cb)
0356 {
0357     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0358     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0359 
0360     vq->cb = cb->callback;
0361     vq->private = cb->private;
0362 }
0363 
0364 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
0365 {
0366     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0367     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0368     bool old_ready;
0369 
0370     spin_lock(&vdpasim->lock);
0371     old_ready = vq->ready;
0372     vq->ready = ready;
0373     if (vq->ready && !old_ready) {
0374         vdpasim_queue_ready(vdpasim, idx);
0375     }
0376     spin_unlock(&vdpasim->lock);
0377 }
0378 
0379 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
0380 {
0381     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0382     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0383 
0384     return vq->ready;
0385 }
0386 
0387 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
0388                 const struct vdpa_vq_state *state)
0389 {
0390     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0391     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0392     struct vringh *vrh = &vq->vring;
0393 
0394     spin_lock(&vdpasim->lock);
0395     vrh->last_avail_idx = state->split.avail_index;
0396     spin_unlock(&vdpasim->lock);
0397 
0398     return 0;
0399 }
0400 
0401 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
0402                 struct vdpa_vq_state *state)
0403 {
0404     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0405     struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
0406     struct vringh *vrh = &vq->vring;
0407 
0408     state->split.avail_index = vrh->last_avail_idx;
0409     return 0;
0410 }
0411 
0412 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
0413 {
0414     return VDPASIM_QUEUE_ALIGN;
0415 }
0416 
0417 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
0418 {
0419     /* RX and TX belongs to group 0, CVQ belongs to group 1 */
0420     if (idx == 2)
0421         return 1;
0422     else
0423         return 0;
0424 }
0425 
0426 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
0427 {
0428     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0429 
0430     return vdpasim->dev_attr.supported_features;
0431 }
0432 
0433 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
0434 {
0435     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0436 
0437     /* DMA mapping must be done by driver */
0438     if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
0439         return -EINVAL;
0440 
0441     vdpasim->features = features & vdpasim->dev_attr.supported_features;
0442 
0443     return 0;
0444 }
0445 
0446 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
0447 {
0448     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0449 
0450     return vdpasim->features;
0451 }
0452 
0453 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
0454                   struct vdpa_callback *cb)
0455 {
0456     /* We don't support config interrupt */
0457 }
0458 
0459 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
0460 {
0461     return VDPASIM_QUEUE_MAX;
0462 }
0463 
0464 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
0465 {
0466     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0467 
0468     return vdpasim->dev_attr.id;
0469 }
0470 
0471 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
0472 {
0473     return VDPASIM_VENDOR_ID;
0474 }
0475 
0476 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
0477 {
0478     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0479     u8 status;
0480 
0481     spin_lock(&vdpasim->lock);
0482     status = vdpasim->status;
0483     spin_unlock(&vdpasim->lock);
0484 
0485     return status;
0486 }
0487 
0488 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
0489 {
0490     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0491 
0492     spin_lock(&vdpasim->lock);
0493     vdpasim->status = status;
0494     spin_unlock(&vdpasim->lock);
0495 }
0496 
0497 static int vdpasim_reset(struct vdpa_device *vdpa)
0498 {
0499     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0500 
0501     spin_lock(&vdpasim->lock);
0502     vdpasim->status = 0;
0503     vdpasim_do_reset(vdpasim);
0504     spin_unlock(&vdpasim->lock);
0505 
0506     return 0;
0507 }
0508 
0509 static int vdpasim_suspend(struct vdpa_device *vdpa)
0510 {
0511     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0512 
0513     spin_lock(&vdpasim->lock);
0514     vdpasim->running = false;
0515     spin_unlock(&vdpasim->lock);
0516 
0517     return 0;
0518 }
0519 
0520 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
0521 {
0522     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0523 
0524     return vdpasim->dev_attr.config_size;
0525 }
0526 
0527 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
0528                  void *buf, unsigned int len)
0529 {
0530     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0531 
0532     if (offset + len > vdpasim->dev_attr.config_size)
0533         return;
0534 
0535     if (vdpasim->dev_attr.get_config)
0536         vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
0537 
0538     memcpy(buf, vdpasim->config + offset, len);
0539 }
0540 
0541 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
0542                  const void *buf, unsigned int len)
0543 {
0544     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0545 
0546     if (offset + len > vdpasim->dev_attr.config_size)
0547         return;
0548 
0549     memcpy(vdpasim->config + offset, buf, len);
0550 
0551     if (vdpasim->dev_attr.set_config)
0552         vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
0553 }
0554 
0555 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
0556 {
0557     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0558 
0559     return vdpasim->generation;
0560 }
0561 
0562 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
0563 {
0564     struct vdpa_iova_range range = {
0565         .first = 0ULL,
0566         .last = ULLONG_MAX,
0567     };
0568 
0569     return range;
0570 }
0571 
0572 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
0573                   unsigned int asid)
0574 {
0575     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0576     struct vhost_iotlb *iommu;
0577     int i;
0578 
0579     if (group > vdpasim->dev_attr.ngroups)
0580         return -EINVAL;
0581 
0582     if (asid >= vdpasim->dev_attr.nas)
0583         return -EINVAL;
0584 
0585     iommu = &vdpasim->iommu[asid];
0586 
0587     spin_lock(&vdpasim->lock);
0588 
0589     for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
0590         if (vdpasim_get_vq_group(vdpa, i) == group)
0591             vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
0592                      &vdpasim->iommu_lock);
0593 
0594     spin_unlock(&vdpasim->lock);
0595 
0596     return 0;
0597 }
0598 
0599 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
0600                struct vhost_iotlb *iotlb)
0601 {
0602     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0603     struct vhost_iotlb_map *map;
0604     struct vhost_iotlb *iommu;
0605     u64 start = 0ULL, last = 0ULL - 1;
0606     int ret;
0607 
0608     if (asid >= vdpasim->dev_attr.nas)
0609         return -EINVAL;
0610 
0611     spin_lock(&vdpasim->iommu_lock);
0612 
0613     iommu = &vdpasim->iommu[asid];
0614     vhost_iotlb_reset(iommu);
0615 
0616     for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
0617          map = vhost_iotlb_itree_next(map, start, last)) {
0618         ret = vhost_iotlb_add_range(iommu, map->start,
0619                         map->last, map->addr, map->perm);
0620         if (ret)
0621             goto err;
0622     }
0623     spin_unlock(&vdpasim->iommu_lock);
0624     return 0;
0625 
0626 err:
0627     vhost_iotlb_reset(iommu);
0628     spin_unlock(&vdpasim->iommu_lock);
0629     return ret;
0630 }
0631 
0632 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
0633                u64 iova, u64 size,
0634                u64 pa, u32 perm, void *opaque)
0635 {
0636     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0637     int ret;
0638 
0639     if (asid >= vdpasim->dev_attr.nas)
0640         return -EINVAL;
0641 
0642     spin_lock(&vdpasim->iommu_lock);
0643     ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
0644                     iova + size - 1, pa, perm, opaque);
0645     spin_unlock(&vdpasim->iommu_lock);
0646 
0647     return ret;
0648 }
0649 
0650 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
0651                  u64 iova, u64 size)
0652 {
0653     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0654 
0655     if (asid >= vdpasim->dev_attr.nas)
0656         return -EINVAL;
0657 
0658     spin_lock(&vdpasim->iommu_lock);
0659     vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
0660     spin_unlock(&vdpasim->iommu_lock);
0661 
0662     return 0;
0663 }
0664 
0665 static void vdpasim_free(struct vdpa_device *vdpa)
0666 {
0667     struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
0668     int i;
0669 
0670     cancel_work_sync(&vdpasim->work);
0671 
0672     for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
0673         vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
0674         vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
0675     }
0676 
0677     if (vdpa_get_dma_dev(vdpa)) {
0678         put_iova_domain(&vdpasim->iova);
0679         iova_cache_put();
0680     }
0681 
0682     kvfree(vdpasim->buffer);
0683     vhost_iotlb_free(vdpasim->iommu);
0684     kfree(vdpasim->vqs);
0685     kfree(vdpasim->config);
0686 }
0687 
0688 static const struct vdpa_config_ops vdpasim_config_ops = {
0689     .set_vq_address         = vdpasim_set_vq_address,
0690     .set_vq_num             = vdpasim_set_vq_num,
0691     .kick_vq                = vdpasim_kick_vq,
0692     .set_vq_cb              = vdpasim_set_vq_cb,
0693     .set_vq_ready           = vdpasim_set_vq_ready,
0694     .get_vq_ready           = vdpasim_get_vq_ready,
0695     .set_vq_state           = vdpasim_set_vq_state,
0696     .get_vq_state           = vdpasim_get_vq_state,
0697     .get_vq_align           = vdpasim_get_vq_align,
0698     .get_vq_group           = vdpasim_get_vq_group,
0699     .get_device_features    = vdpasim_get_device_features,
0700     .set_driver_features    = vdpasim_set_driver_features,
0701     .get_driver_features    = vdpasim_get_driver_features,
0702     .set_config_cb          = vdpasim_set_config_cb,
0703     .get_vq_num_max         = vdpasim_get_vq_num_max,
0704     .get_device_id          = vdpasim_get_device_id,
0705     .get_vendor_id          = vdpasim_get_vendor_id,
0706     .get_status             = vdpasim_get_status,
0707     .set_status             = vdpasim_set_status,
0708     .reset          = vdpasim_reset,
0709     .suspend        = vdpasim_suspend,
0710     .get_config_size        = vdpasim_get_config_size,
0711     .get_config             = vdpasim_get_config,
0712     .set_config             = vdpasim_set_config,
0713     .get_generation         = vdpasim_get_generation,
0714     .get_iova_range         = vdpasim_get_iova_range,
0715     .set_group_asid         = vdpasim_set_group_asid,
0716     .dma_map                = vdpasim_dma_map,
0717     .dma_unmap              = vdpasim_dma_unmap,
0718     .free                   = vdpasim_free,
0719 };
0720 
0721 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
0722     .set_vq_address         = vdpasim_set_vq_address,
0723     .set_vq_num             = vdpasim_set_vq_num,
0724     .kick_vq                = vdpasim_kick_vq,
0725     .set_vq_cb              = vdpasim_set_vq_cb,
0726     .set_vq_ready           = vdpasim_set_vq_ready,
0727     .get_vq_ready           = vdpasim_get_vq_ready,
0728     .set_vq_state           = vdpasim_set_vq_state,
0729     .get_vq_state           = vdpasim_get_vq_state,
0730     .get_vq_align           = vdpasim_get_vq_align,
0731     .get_vq_group           = vdpasim_get_vq_group,
0732     .get_device_features    = vdpasim_get_device_features,
0733     .set_driver_features    = vdpasim_set_driver_features,
0734     .get_driver_features    = vdpasim_get_driver_features,
0735     .set_config_cb          = vdpasim_set_config_cb,
0736     .get_vq_num_max         = vdpasim_get_vq_num_max,
0737     .get_device_id          = vdpasim_get_device_id,
0738     .get_vendor_id          = vdpasim_get_vendor_id,
0739     .get_status             = vdpasim_get_status,
0740     .set_status             = vdpasim_set_status,
0741     .reset          = vdpasim_reset,
0742     .suspend        = vdpasim_suspend,
0743     .get_config_size        = vdpasim_get_config_size,
0744     .get_config             = vdpasim_get_config,
0745     .set_config             = vdpasim_set_config,
0746     .get_generation         = vdpasim_get_generation,
0747     .get_iova_range         = vdpasim_get_iova_range,
0748     .set_group_asid         = vdpasim_set_group_asid,
0749     .set_map                = vdpasim_set_map,
0750     .free                   = vdpasim_free,
0751 };
0752 
0753 MODULE_VERSION(DRV_VERSION);
0754 MODULE_LICENSE(DRV_LICENSE);
0755 MODULE_AUTHOR(DRV_AUTHOR);
0756 MODULE_DESCRIPTION(DRV_DESC);