0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009
0010 #include <linux/amba/bus.h>
0011 #include <linux/delay.h>
0012 #include <linux/dma-iommu.h>
0013 #include <linux/dma-map-ops.h>
0014 #include <linux/freezer.h>
0015 #include <linux/interval_tree.h>
0016 #include <linux/iommu.h>
0017 #include <linux/module.h>
0018 #include <linux/of_platform.h>
0019 #include <linux/pci.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/virtio.h>
0022 #include <linux/virtio_config.h>
0023 #include <linux/virtio_ids.h>
0024 #include <linux/wait.h>
0025
0026 #include <uapi/linux/virtio_iommu.h>
0027
0028 #define MSI_IOVA_BASE 0x8000000
0029 #define MSI_IOVA_LENGTH 0x100000
0030
0031 #define VIOMMU_REQUEST_VQ 0
0032 #define VIOMMU_EVENT_VQ 1
0033 #define VIOMMU_NR_VQS 2
0034
0035 struct viommu_dev {
0036 struct iommu_device iommu;
0037 struct device *dev;
0038 struct virtio_device *vdev;
0039
0040 struct ida domain_ids;
0041
0042 struct virtqueue *vqs[VIOMMU_NR_VQS];
0043 spinlock_t request_lock;
0044 struct list_head requests;
0045 void *evts;
0046
0047
0048 struct iommu_domain_geometry geometry;
0049 u64 pgsize_bitmap;
0050 u32 first_domain;
0051 u32 last_domain;
0052
0053 u32 map_flags;
0054 u32 probe_size;
0055 };
0056
0057 struct viommu_mapping {
0058 phys_addr_t paddr;
0059 struct interval_tree_node iova;
0060 u32 flags;
0061 };
0062
0063 struct viommu_domain {
0064 struct iommu_domain domain;
0065 struct viommu_dev *viommu;
0066 struct mutex mutex;
0067 unsigned int id;
0068 u32 map_flags;
0069
0070 spinlock_t mappings_lock;
0071 struct rb_root_cached mappings;
0072
0073 unsigned long nr_endpoints;
0074 bool bypass;
0075 };
0076
0077 struct viommu_endpoint {
0078 struct device *dev;
0079 struct viommu_dev *viommu;
0080 struct viommu_domain *vdomain;
0081 struct list_head resv_regions;
0082 };
0083
0084 struct viommu_request {
0085 struct list_head list;
0086 void *writeback;
0087 unsigned int write_offset;
0088 unsigned int len;
0089 char buf[];
0090 };
0091
0092 #define VIOMMU_FAULT_RESV_MASK 0xffffff00
0093
0094 struct viommu_event {
0095 union {
0096 u32 head;
0097 struct virtio_iommu_fault fault;
0098 };
0099 };
0100
0101 #define to_viommu_domain(domain) \
0102 container_of(domain, struct viommu_domain, domain)
0103
0104 static int viommu_get_req_errno(void *buf, size_t len)
0105 {
0106 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
0107
0108 switch (tail->status) {
0109 case VIRTIO_IOMMU_S_OK:
0110 return 0;
0111 case VIRTIO_IOMMU_S_UNSUPP:
0112 return -ENOSYS;
0113 case VIRTIO_IOMMU_S_INVAL:
0114 return -EINVAL;
0115 case VIRTIO_IOMMU_S_RANGE:
0116 return -ERANGE;
0117 case VIRTIO_IOMMU_S_NOENT:
0118 return -ENOENT;
0119 case VIRTIO_IOMMU_S_FAULT:
0120 return -EFAULT;
0121 case VIRTIO_IOMMU_S_NOMEM:
0122 return -ENOMEM;
0123 case VIRTIO_IOMMU_S_IOERR:
0124 case VIRTIO_IOMMU_S_DEVERR:
0125 default:
0126 return -EIO;
0127 }
0128 }
0129
0130 static void viommu_set_req_status(void *buf, size_t len, int status)
0131 {
0132 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
0133
0134 tail->status = status;
0135 }
0136
0137 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
0138 struct virtio_iommu_req_head *req,
0139 size_t len)
0140 {
0141 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
0142
0143 if (req->type == VIRTIO_IOMMU_T_PROBE)
0144 return len - viommu->probe_size - tail_size;
0145
0146 return len - tail_size;
0147 }
0148
0149
0150
0151
0152
0153
0154
0155 static int __viommu_sync_req(struct viommu_dev *viommu)
0156 {
0157 unsigned int len;
0158 size_t write_len;
0159 struct viommu_request *req;
0160 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
0161
0162 assert_spin_locked(&viommu->request_lock);
0163
0164 virtqueue_kick(vq);
0165
0166 while (!list_empty(&viommu->requests)) {
0167 len = 0;
0168 req = virtqueue_get_buf(vq, &len);
0169 if (!req)
0170 continue;
0171
0172 if (!len)
0173 viommu_set_req_status(req->buf, req->len,
0174 VIRTIO_IOMMU_S_IOERR);
0175
0176 write_len = req->len - req->write_offset;
0177 if (req->writeback && len == write_len)
0178 memcpy(req->writeback, req->buf + req->write_offset,
0179 write_len);
0180
0181 list_del(&req->list);
0182 kfree(req);
0183 }
0184
0185 return 0;
0186 }
0187
0188 static int viommu_sync_req(struct viommu_dev *viommu)
0189 {
0190 int ret;
0191 unsigned long flags;
0192
0193 spin_lock_irqsave(&viommu->request_lock, flags);
0194 ret = __viommu_sync_req(viommu);
0195 if (ret)
0196 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
0197 spin_unlock_irqrestore(&viommu->request_lock, flags);
0198
0199 return ret;
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
0219 bool writeback)
0220 {
0221 int ret;
0222 off_t write_offset;
0223 struct viommu_request *req;
0224 struct scatterlist top_sg, bottom_sg;
0225 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
0226 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
0227
0228 assert_spin_locked(&viommu->request_lock);
0229
0230 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
0231 if (write_offset <= 0)
0232 return -EINVAL;
0233
0234 req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
0235 if (!req)
0236 return -ENOMEM;
0237
0238 req->len = len;
0239 if (writeback) {
0240 req->writeback = buf + write_offset;
0241 req->write_offset = write_offset;
0242 }
0243 memcpy(&req->buf, buf, write_offset);
0244
0245 sg_init_one(&top_sg, req->buf, write_offset);
0246 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
0247
0248 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
0249 if (ret == -ENOSPC) {
0250
0251 if (!__viommu_sync_req(viommu))
0252 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
0253 }
0254 if (ret)
0255 goto err_free;
0256
0257 list_add_tail(&req->list, &viommu->requests);
0258 return 0;
0259
0260 err_free:
0261 kfree(req);
0262 return ret;
0263 }
0264
0265 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
0266 {
0267 int ret;
0268 unsigned long flags;
0269
0270 spin_lock_irqsave(&viommu->request_lock, flags);
0271 ret = __viommu_add_req(viommu, buf, len, false);
0272 if (ret)
0273 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
0274 spin_unlock_irqrestore(&viommu->request_lock, flags);
0275
0276 return ret;
0277 }
0278
0279
0280
0281
0282
0283 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
0284 size_t len)
0285 {
0286 int ret;
0287 unsigned long flags;
0288
0289 spin_lock_irqsave(&viommu->request_lock, flags);
0290
0291 ret = __viommu_add_req(viommu, buf, len, true);
0292 if (ret) {
0293 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
0294 goto out_unlock;
0295 }
0296
0297 ret = __viommu_sync_req(viommu);
0298 if (ret) {
0299 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
0300
0301 }
0302
0303 ret = viommu_get_req_errno(buf, len);
0304 out_unlock:
0305 spin_unlock_irqrestore(&viommu->request_lock, flags);
0306 return ret;
0307 }
0308
0309
0310
0311
0312
0313
0314 static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
0315 phys_addr_t paddr, u32 flags)
0316 {
0317 unsigned long irqflags;
0318 struct viommu_mapping *mapping;
0319
0320 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
0321 if (!mapping)
0322 return -ENOMEM;
0323
0324 mapping->paddr = paddr;
0325 mapping->iova.start = iova;
0326 mapping->iova.last = end;
0327 mapping->flags = flags;
0328
0329 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
0330 interval_tree_insert(&mapping->iova, &vdomain->mappings);
0331 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
0332
0333 return 0;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 static size_t viommu_del_mappings(struct viommu_domain *vdomain,
0346 u64 iova, u64 end)
0347 {
0348 size_t unmapped = 0;
0349 unsigned long flags;
0350 struct viommu_mapping *mapping = NULL;
0351 struct interval_tree_node *node, *next;
0352
0353 spin_lock_irqsave(&vdomain->mappings_lock, flags);
0354 next = interval_tree_iter_first(&vdomain->mappings, iova, end);
0355 while (next) {
0356 node = next;
0357 mapping = container_of(node, struct viommu_mapping, iova);
0358 next = interval_tree_iter_next(node, iova, end);
0359
0360
0361 if (mapping->iova.start < iova)
0362 break;
0363
0364
0365
0366
0367
0368 unmapped += mapping->iova.last - mapping->iova.start + 1;
0369
0370 interval_tree_remove(node, &vdomain->mappings);
0371 kfree(mapping);
0372 }
0373 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
0374
0375 return unmapped;
0376 }
0377
0378
0379
0380
0381
0382 static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
0383 struct viommu_domain *vdomain)
0384 {
0385 int ret;
0386 struct iommu_resv_region *resv;
0387 u64 iova = vdomain->domain.geometry.aperture_start;
0388 u64 limit = vdomain->domain.geometry.aperture_end;
0389 u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
0390 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
0391
0392 iova = ALIGN(iova, granule);
0393 limit = ALIGN_DOWN(limit + 1, granule) - 1;
0394
0395 list_for_each_entry(resv, &vdev->resv_regions, list) {
0396 u64 resv_start = ALIGN_DOWN(resv->start, granule);
0397 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
0398
0399 if (resv_end < iova || resv_start > limit)
0400
0401 continue;
0402
0403 if (resv_start > iova) {
0404 ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
0405 (phys_addr_t)iova, flags);
0406 if (ret)
0407 goto err_unmap;
0408 }
0409
0410 if (resv_end >= limit)
0411 return 0;
0412
0413 iova = resv_end + 1;
0414 }
0415
0416 ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
0417 flags);
0418 if (ret)
0419 goto err_unmap;
0420 return 0;
0421
0422 err_unmap:
0423 viommu_del_mappings(vdomain, 0, iova);
0424 return ret;
0425 }
0426
0427
0428
0429
0430
0431
0432
0433
0434 static int viommu_replay_mappings(struct viommu_domain *vdomain)
0435 {
0436 int ret = 0;
0437 unsigned long flags;
0438 struct viommu_mapping *mapping;
0439 struct interval_tree_node *node;
0440 struct virtio_iommu_req_map map;
0441
0442 spin_lock_irqsave(&vdomain->mappings_lock, flags);
0443 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
0444 while (node) {
0445 mapping = container_of(node, struct viommu_mapping, iova);
0446 map = (struct virtio_iommu_req_map) {
0447 .head.type = VIRTIO_IOMMU_T_MAP,
0448 .domain = cpu_to_le32(vdomain->id),
0449 .virt_start = cpu_to_le64(mapping->iova.start),
0450 .virt_end = cpu_to_le64(mapping->iova.last),
0451 .phys_start = cpu_to_le64(mapping->paddr),
0452 .flags = cpu_to_le32(mapping->flags),
0453 };
0454
0455 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
0456 if (ret)
0457 break;
0458
0459 node = interval_tree_iter_next(node, 0, -1UL);
0460 }
0461 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
0462
0463 return ret;
0464 }
0465
0466 static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
0467 struct virtio_iommu_probe_resv_mem *mem,
0468 size_t len)
0469 {
0470 size_t size;
0471 u64 start64, end64;
0472 phys_addr_t start, end;
0473 struct iommu_resv_region *region = NULL, *next;
0474 unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
0475
0476 start = start64 = le64_to_cpu(mem->start);
0477 end = end64 = le64_to_cpu(mem->end);
0478 size = end64 - start64 + 1;
0479
0480
0481 if (start != start64 || end != end64 || size < end64 - start64)
0482 return -EOVERFLOW;
0483
0484 if (len < sizeof(*mem))
0485 return -EINVAL;
0486
0487 switch (mem->subtype) {
0488 default:
0489 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
0490 mem->subtype);
0491 fallthrough;
0492 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
0493 region = iommu_alloc_resv_region(start, size, 0,
0494 IOMMU_RESV_RESERVED);
0495 break;
0496 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
0497 region = iommu_alloc_resv_region(start, size, prot,
0498 IOMMU_RESV_MSI);
0499 break;
0500 }
0501 if (!region)
0502 return -ENOMEM;
0503
0504
0505 list_for_each_entry(next, &vdev->resv_regions, list) {
0506 if (next->start > region->start)
0507 break;
0508 }
0509 list_add_tail(®ion->list, &next->list);
0510 return 0;
0511 }
0512
0513 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
0514 {
0515 int ret;
0516 u16 type, len;
0517 size_t cur = 0;
0518 size_t probe_len;
0519 struct virtio_iommu_req_probe *probe;
0520 struct virtio_iommu_probe_property *prop;
0521 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0522 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
0523
0524 if (!fwspec->num_ids)
0525 return -EINVAL;
0526
0527 probe_len = sizeof(*probe) + viommu->probe_size +
0528 sizeof(struct virtio_iommu_req_tail);
0529 probe = kzalloc(probe_len, GFP_KERNEL);
0530 if (!probe)
0531 return -ENOMEM;
0532
0533 probe->head.type = VIRTIO_IOMMU_T_PROBE;
0534
0535
0536
0537
0538 probe->endpoint = cpu_to_le32(fwspec->ids[0]);
0539
0540 ret = viommu_send_req_sync(viommu, probe, probe_len);
0541 if (ret)
0542 goto out_free;
0543
0544 prop = (void *)probe->properties;
0545 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
0546
0547 while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
0548 cur < viommu->probe_size) {
0549 len = le16_to_cpu(prop->length) + sizeof(*prop);
0550
0551 switch (type) {
0552 case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
0553 ret = viommu_add_resv_mem(vdev, (void *)prop, len);
0554 break;
0555 default:
0556 dev_err(dev, "unknown viommu prop 0x%x\n", type);
0557 }
0558
0559 if (ret)
0560 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
0561
0562 cur += len;
0563 if (cur >= viommu->probe_size)
0564 break;
0565
0566 prop = (void *)probe->properties + cur;
0567 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
0568 }
0569
0570 out_free:
0571 kfree(probe);
0572 return ret;
0573 }
0574
0575 static int viommu_fault_handler(struct viommu_dev *viommu,
0576 struct virtio_iommu_fault *fault)
0577 {
0578 char *reason_str;
0579
0580 u8 reason = fault->reason;
0581 u32 flags = le32_to_cpu(fault->flags);
0582 u32 endpoint = le32_to_cpu(fault->endpoint);
0583 u64 address = le64_to_cpu(fault->address);
0584
0585 switch (reason) {
0586 case VIRTIO_IOMMU_FAULT_R_DOMAIN:
0587 reason_str = "domain";
0588 break;
0589 case VIRTIO_IOMMU_FAULT_R_MAPPING:
0590 reason_str = "page";
0591 break;
0592 case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
0593 default:
0594 reason_str = "unknown";
0595 break;
0596 }
0597
0598
0599 if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
0600 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
0601 reason_str, endpoint, address,
0602 flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
0603 flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
0604 flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
0605 else
0606 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
0607 reason_str, endpoint);
0608 return 0;
0609 }
0610
0611 static void viommu_event_handler(struct virtqueue *vq)
0612 {
0613 int ret;
0614 unsigned int len;
0615 struct scatterlist sg[1];
0616 struct viommu_event *evt;
0617 struct viommu_dev *viommu = vq->vdev->priv;
0618
0619 while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
0620 if (len > sizeof(*evt)) {
0621 dev_err(viommu->dev,
0622 "invalid event buffer (len %u != %zu)\n",
0623 len, sizeof(*evt));
0624 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
0625 viommu_fault_handler(viommu, &evt->fault);
0626 }
0627
0628 sg_init_one(sg, evt, sizeof(*evt));
0629 ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
0630 if (ret)
0631 dev_err(viommu->dev, "could not add event buffer\n");
0632 }
0633
0634 virtqueue_kick(vq);
0635 }
0636
0637
0638
0639 static struct iommu_domain *viommu_domain_alloc(unsigned type)
0640 {
0641 struct viommu_domain *vdomain;
0642
0643 if (type != IOMMU_DOMAIN_UNMANAGED &&
0644 type != IOMMU_DOMAIN_DMA &&
0645 type != IOMMU_DOMAIN_IDENTITY)
0646 return NULL;
0647
0648 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
0649 if (!vdomain)
0650 return NULL;
0651
0652 mutex_init(&vdomain->mutex);
0653 spin_lock_init(&vdomain->mappings_lock);
0654 vdomain->mappings = RB_ROOT_CACHED;
0655
0656 return &vdomain->domain;
0657 }
0658
0659 static int viommu_domain_finalise(struct viommu_endpoint *vdev,
0660 struct iommu_domain *domain)
0661 {
0662 int ret;
0663 unsigned long viommu_page_size;
0664 struct viommu_dev *viommu = vdev->viommu;
0665 struct viommu_domain *vdomain = to_viommu_domain(domain);
0666
0667 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
0668 if (viommu_page_size > PAGE_SIZE) {
0669 dev_err(vdev->dev,
0670 "granule 0x%lx larger than system page size 0x%lx\n",
0671 viommu_page_size, PAGE_SIZE);
0672 return -EINVAL;
0673 }
0674
0675 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
0676 viommu->last_domain, GFP_KERNEL);
0677 if (ret < 0)
0678 return ret;
0679
0680 vdomain->id = (unsigned int)ret;
0681
0682 domain->pgsize_bitmap = viommu->pgsize_bitmap;
0683 domain->geometry = viommu->geometry;
0684
0685 vdomain->map_flags = viommu->map_flags;
0686 vdomain->viommu = viommu;
0687
0688 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
0689 if (virtio_has_feature(viommu->vdev,
0690 VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
0691 vdomain->bypass = true;
0692 return 0;
0693 }
0694
0695 ret = viommu_domain_map_identity(vdev, vdomain);
0696 if (ret) {
0697 ida_free(&viommu->domain_ids, vdomain->id);
0698 vdomain->viommu = NULL;
0699 return -EOPNOTSUPP;
0700 }
0701 }
0702
0703 return 0;
0704 }
0705
0706 static void viommu_domain_free(struct iommu_domain *domain)
0707 {
0708 struct viommu_domain *vdomain = to_viommu_domain(domain);
0709
0710
0711 viommu_del_mappings(vdomain, 0, ULLONG_MAX);
0712
0713 if (vdomain->viommu)
0714 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
0715
0716 kfree(vdomain);
0717 }
0718
0719 static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
0720 {
0721 int i;
0722 int ret = 0;
0723 struct virtio_iommu_req_attach req;
0724 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0725 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
0726 struct viommu_domain *vdomain = to_viommu_domain(domain);
0727
0728 mutex_lock(&vdomain->mutex);
0729 if (!vdomain->viommu) {
0730
0731
0732
0733
0734 ret = viommu_domain_finalise(vdev, domain);
0735 } else if (vdomain->viommu != vdev->viommu) {
0736 dev_err(dev, "cannot attach to foreign vIOMMU\n");
0737 ret = -EXDEV;
0738 }
0739 mutex_unlock(&vdomain->mutex);
0740
0741 if (ret)
0742 return ret;
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756 if (vdev->vdomain)
0757 vdev->vdomain->nr_endpoints--;
0758
0759 req = (struct virtio_iommu_req_attach) {
0760 .head.type = VIRTIO_IOMMU_T_ATTACH,
0761 .domain = cpu_to_le32(vdomain->id),
0762 };
0763
0764 if (vdomain->bypass)
0765 req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
0766
0767 for (i = 0; i < fwspec->num_ids; i++) {
0768 req.endpoint = cpu_to_le32(fwspec->ids[i]);
0769
0770 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
0771 if (ret)
0772 return ret;
0773 }
0774
0775 if (!vdomain->nr_endpoints) {
0776
0777
0778
0779
0780 ret = viommu_replay_mappings(vdomain);
0781 if (ret)
0782 return ret;
0783 }
0784
0785 vdomain->nr_endpoints++;
0786 vdev->vdomain = vdomain;
0787
0788 return 0;
0789 }
0790
0791 static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
0792 phys_addr_t paddr, size_t pgsize, size_t pgcount,
0793 int prot, gfp_t gfp, size_t *mapped)
0794 {
0795 int ret;
0796 u32 flags;
0797 size_t size = pgsize * pgcount;
0798 u64 end = iova + size - 1;
0799 struct virtio_iommu_req_map map;
0800 struct viommu_domain *vdomain = to_viommu_domain(domain);
0801
0802 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
0803 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
0804 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
0805
0806 if (flags & ~vdomain->map_flags)
0807 return -EINVAL;
0808
0809 ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
0810 if (ret)
0811 return ret;
0812
0813 map = (struct virtio_iommu_req_map) {
0814 .head.type = VIRTIO_IOMMU_T_MAP,
0815 .domain = cpu_to_le32(vdomain->id),
0816 .virt_start = cpu_to_le64(iova),
0817 .phys_start = cpu_to_le64(paddr),
0818 .virt_end = cpu_to_le64(end),
0819 .flags = cpu_to_le32(flags),
0820 };
0821
0822 if (!vdomain->nr_endpoints)
0823 return 0;
0824
0825 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
0826 if (ret)
0827 viommu_del_mappings(vdomain, iova, end);
0828 else if (mapped)
0829 *mapped = size;
0830
0831 return ret;
0832 }
0833
0834 static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
0835 size_t pgsize, size_t pgcount,
0836 struct iommu_iotlb_gather *gather)
0837 {
0838 int ret = 0;
0839 size_t unmapped;
0840 struct virtio_iommu_req_unmap unmap;
0841 struct viommu_domain *vdomain = to_viommu_domain(domain);
0842 size_t size = pgsize * pgcount;
0843
0844 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
0845 if (unmapped < size)
0846 return 0;
0847
0848
0849 if (!vdomain->nr_endpoints)
0850 return unmapped;
0851
0852 unmap = (struct virtio_iommu_req_unmap) {
0853 .head.type = VIRTIO_IOMMU_T_UNMAP,
0854 .domain = cpu_to_le32(vdomain->id),
0855 .virt_start = cpu_to_le64(iova),
0856 .virt_end = cpu_to_le64(iova + unmapped - 1),
0857 };
0858
0859 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
0860 return ret ? 0 : unmapped;
0861 }
0862
0863 static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
0864 dma_addr_t iova)
0865 {
0866 u64 paddr = 0;
0867 unsigned long flags;
0868 struct viommu_mapping *mapping;
0869 struct interval_tree_node *node;
0870 struct viommu_domain *vdomain = to_viommu_domain(domain);
0871
0872 spin_lock_irqsave(&vdomain->mappings_lock, flags);
0873 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
0874 if (node) {
0875 mapping = container_of(node, struct viommu_mapping, iova);
0876 paddr = mapping->paddr + (iova - mapping->iova.start);
0877 }
0878 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
0879
0880 return paddr;
0881 }
0882
0883 static void viommu_iotlb_sync(struct iommu_domain *domain,
0884 struct iommu_iotlb_gather *gather)
0885 {
0886 struct viommu_domain *vdomain = to_viommu_domain(domain);
0887
0888 viommu_sync_req(vdomain->viommu);
0889 }
0890
0891 static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
0892 {
0893 struct iommu_resv_region *entry, *new_entry, *msi = NULL;
0894 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
0895 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
0896
0897 list_for_each_entry(entry, &vdev->resv_regions, list) {
0898 if (entry->type == IOMMU_RESV_MSI)
0899 msi = entry;
0900
0901 new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
0902 if (!new_entry)
0903 return;
0904 list_add_tail(&new_entry->list, head);
0905 }
0906
0907
0908
0909
0910
0911 if (!msi) {
0912 msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
0913 prot, IOMMU_RESV_SW_MSI);
0914 if (!msi)
0915 return;
0916
0917 list_add_tail(&msi->list, head);
0918 }
0919
0920 iommu_dma_get_resv_regions(dev, head);
0921 }
0922
0923 static struct iommu_ops viommu_ops;
0924 static struct virtio_driver virtio_iommu_drv;
0925
0926 static int viommu_match_node(struct device *dev, const void *data)
0927 {
0928 return dev->parent->fwnode == data;
0929 }
0930
0931 static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
0932 {
0933 struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
0934 fwnode, viommu_match_node);
0935 put_device(dev);
0936
0937 return dev ? dev_to_virtio(dev)->priv : NULL;
0938 }
0939
0940 static struct iommu_device *viommu_probe_device(struct device *dev)
0941 {
0942 int ret;
0943 struct viommu_endpoint *vdev;
0944 struct viommu_dev *viommu = NULL;
0945 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0946
0947 if (!fwspec || fwspec->ops != &viommu_ops)
0948 return ERR_PTR(-ENODEV);
0949
0950 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
0951 if (!viommu)
0952 return ERR_PTR(-ENODEV);
0953
0954 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
0955 if (!vdev)
0956 return ERR_PTR(-ENOMEM);
0957
0958 vdev->dev = dev;
0959 vdev->viommu = viommu;
0960 INIT_LIST_HEAD(&vdev->resv_regions);
0961 dev_iommu_priv_set(dev, vdev);
0962
0963 if (viommu->probe_size) {
0964
0965 ret = viommu_probe_endpoint(viommu, dev);
0966 if (ret)
0967 goto err_free_dev;
0968 }
0969
0970 return &viommu->iommu;
0971
0972 err_free_dev:
0973 iommu_put_resv_regions(dev, &vdev->resv_regions);
0974 kfree(vdev);
0975
0976 return ERR_PTR(ret);
0977 }
0978
0979 static void viommu_probe_finalize(struct device *dev)
0980 {
0981 #ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
0982
0983 set_dma_ops(dev, NULL);
0984 iommu_setup_dma_ops(dev, 0, U64_MAX);
0985 #endif
0986 }
0987
0988 static void viommu_release_device(struct device *dev)
0989 {
0990 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
0991
0992 iommu_put_resv_regions(dev, &vdev->resv_regions);
0993 kfree(vdev);
0994 }
0995
0996 static struct iommu_group *viommu_device_group(struct device *dev)
0997 {
0998 if (dev_is_pci(dev))
0999 return pci_device_group(dev);
1000 else
1001 return generic_device_group(dev);
1002 }
1003
1004 static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
1005 {
1006 return iommu_fwspec_add_ids(dev, args->args, 1);
1007 }
1008
1009 static bool viommu_capable(enum iommu_cap cap)
1010 {
1011 switch (cap) {
1012 case IOMMU_CAP_CACHE_COHERENCY:
1013 return true;
1014 default:
1015 return false;
1016 }
1017 }
1018
1019 static struct iommu_ops viommu_ops = {
1020 .capable = viommu_capable,
1021 .domain_alloc = viommu_domain_alloc,
1022 .probe_device = viommu_probe_device,
1023 .probe_finalize = viommu_probe_finalize,
1024 .release_device = viommu_release_device,
1025 .device_group = viommu_device_group,
1026 .get_resv_regions = viommu_get_resv_regions,
1027 .of_xlate = viommu_of_xlate,
1028 .owner = THIS_MODULE,
1029 .default_domain_ops = &(const struct iommu_domain_ops) {
1030 .attach_dev = viommu_attach_dev,
1031 .map_pages = viommu_map_pages,
1032 .unmap_pages = viommu_unmap_pages,
1033 .iova_to_phys = viommu_iova_to_phys,
1034 .iotlb_sync = viommu_iotlb_sync,
1035 .free = viommu_domain_free,
1036 }
1037 };
1038
1039 static int viommu_init_vqs(struct viommu_dev *viommu)
1040 {
1041 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
1042 const char *names[] = { "request", "event" };
1043 vq_callback_t *callbacks[] = {
1044 NULL,
1045 viommu_event_handler,
1046 };
1047
1048 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
1049 names, NULL);
1050 }
1051
1052 static int viommu_fill_evtq(struct viommu_dev *viommu)
1053 {
1054 int i, ret;
1055 struct scatterlist sg[1];
1056 struct viommu_event *evts;
1057 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
1058 size_t nr_evts = vq->num_free;
1059
1060 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
1061 sizeof(*evts), GFP_KERNEL);
1062 if (!evts)
1063 return -ENOMEM;
1064
1065 for (i = 0; i < nr_evts; i++) {
1066 sg_init_one(sg, &evts[i], sizeof(*evts));
1067 ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
1068 if (ret)
1069 return ret;
1070 }
1071
1072 return 0;
1073 }
1074
1075 static int viommu_probe(struct virtio_device *vdev)
1076 {
1077 struct device *parent_dev = vdev->dev.parent;
1078 struct viommu_dev *viommu = NULL;
1079 struct device *dev = &vdev->dev;
1080 u64 input_start = 0;
1081 u64 input_end = -1UL;
1082 int ret;
1083
1084 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
1085 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
1086 return -ENODEV;
1087
1088 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1089 if (!viommu)
1090 return -ENOMEM;
1091
1092 spin_lock_init(&viommu->request_lock);
1093 ida_init(&viommu->domain_ids);
1094 viommu->dev = dev;
1095 viommu->vdev = vdev;
1096 INIT_LIST_HEAD(&viommu->requests);
1097
1098 ret = viommu_init_vqs(viommu);
1099 if (ret)
1100 return ret;
1101
1102 virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
1103 &viommu->pgsize_bitmap);
1104
1105 if (!viommu->pgsize_bitmap) {
1106 ret = -EINVAL;
1107 goto err_free_vqs;
1108 }
1109
1110 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1111 viommu->last_domain = ~0U;
1112
1113
1114 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1115 struct virtio_iommu_config, input_range.start,
1116 &input_start);
1117
1118 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1119 struct virtio_iommu_config, input_range.end,
1120 &input_end);
1121
1122 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1123 struct virtio_iommu_config, domain_range.start,
1124 &viommu->first_domain);
1125
1126 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1127 struct virtio_iommu_config, domain_range.end,
1128 &viommu->last_domain);
1129
1130 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1131 struct virtio_iommu_config, probe_size,
1132 &viommu->probe_size);
1133
1134 viommu->geometry = (struct iommu_domain_geometry) {
1135 .aperture_start = input_start,
1136 .aperture_end = input_end,
1137 .force_aperture = true,
1138 };
1139
1140 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1141 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1142
1143 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1144
1145 virtio_device_ready(vdev);
1146
1147
1148 ret = viommu_fill_evtq(viommu);
1149 if (ret)
1150 goto err_free_vqs;
1151
1152 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1153 virtio_bus_name(vdev));
1154 if (ret)
1155 goto err_free_vqs;
1156
1157 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
1158
1159 #ifdef CONFIG_PCI
1160 if (pci_bus_type.iommu_ops != &viommu_ops) {
1161 ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
1162 if (ret)
1163 goto err_unregister;
1164 }
1165 #endif
1166 #ifdef CONFIG_ARM_AMBA
1167 if (amba_bustype.iommu_ops != &viommu_ops) {
1168 ret = bus_set_iommu(&amba_bustype, &viommu_ops);
1169 if (ret)
1170 goto err_unregister;
1171 }
1172 #endif
1173 if (platform_bus_type.iommu_ops != &viommu_ops) {
1174 ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
1175 if (ret)
1176 goto err_unregister;
1177 }
1178
1179 vdev->priv = viommu;
1180
1181 dev_info(dev, "input address: %u bits\n",
1182 order_base_2(viommu->geometry.aperture_end));
1183 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1184
1185 return 0;
1186
1187 err_unregister:
1188 iommu_device_sysfs_remove(&viommu->iommu);
1189 iommu_device_unregister(&viommu->iommu);
1190 err_free_vqs:
1191 vdev->config->del_vqs(vdev);
1192
1193 return ret;
1194 }
1195
1196 static void viommu_remove(struct virtio_device *vdev)
1197 {
1198 struct viommu_dev *viommu = vdev->priv;
1199
1200 iommu_device_sysfs_remove(&viommu->iommu);
1201 iommu_device_unregister(&viommu->iommu);
1202
1203
1204 virtio_reset_device(vdev);
1205 vdev->config->del_vqs(vdev);
1206
1207 dev_info(&vdev->dev, "device removed\n");
1208 }
1209
1210 static void viommu_config_changed(struct virtio_device *vdev)
1211 {
1212 dev_warn(&vdev->dev, "config changed\n");
1213 }
1214
1215 static unsigned int features[] = {
1216 VIRTIO_IOMMU_F_MAP_UNMAP,
1217 VIRTIO_IOMMU_F_INPUT_RANGE,
1218 VIRTIO_IOMMU_F_DOMAIN_RANGE,
1219 VIRTIO_IOMMU_F_PROBE,
1220 VIRTIO_IOMMU_F_MMIO,
1221 VIRTIO_IOMMU_F_BYPASS_CONFIG,
1222 };
1223
1224 static struct virtio_device_id id_table[] = {
1225 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
1226 { 0 },
1227 };
1228 MODULE_DEVICE_TABLE(virtio, id_table);
1229
1230 static struct virtio_driver virtio_iommu_drv = {
1231 .driver.name = KBUILD_MODNAME,
1232 .driver.owner = THIS_MODULE,
1233 .id_table = id_table,
1234 .feature_table = features,
1235 .feature_table_size = ARRAY_SIZE(features),
1236 .probe = viommu_probe,
1237 .remove = viommu_remove,
1238 .config_changed = viommu_config_changed,
1239 };
1240
1241 module_virtio_driver(virtio_iommu_drv);
1242
1243 MODULE_DESCRIPTION("Virtio IOMMU driver");
1244 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1245 MODULE_LICENSE("GPL v2");