0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <kvm/iodev.h>
0013
0014 #include <linux/kvm_host.h>
0015 #include <linux/slab.h>
0016 #include <linux/kvm.h>
0017
0018 #include "coalesced_mmio.h"
0019
0020 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
0021 {
0022 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
0023 }
0024
0025 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
0026 gpa_t addr, int len)
0027 {
0028
0029
0030
0031
0032 if (len < 0)
0033 return 0;
0034 if (addr + len < addr)
0035 return 0;
0036 if (addr < dev->zone.addr)
0037 return 0;
0038 if (addr + len > dev->zone.addr + dev->zone.size)
0039 return 0;
0040 return 1;
0041 }
0042
0043 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
0044 {
0045 struct kvm_coalesced_mmio_ring *ring;
0046 unsigned avail;
0047
0048
0049
0050
0051
0052
0053
0054 ring = dev->kvm->coalesced_mmio_ring;
0055 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
0056 if (avail == 0) {
0057
0058 return 0;
0059 }
0060
0061 return 1;
0062 }
0063
0064 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
0065 struct kvm_io_device *this, gpa_t addr,
0066 int len, const void *val)
0067 {
0068 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
0069 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
0070 __u32 insert;
0071
0072 if (!coalesced_mmio_in_range(dev, addr, len))
0073 return -EOPNOTSUPP;
0074
0075 spin_lock(&dev->kvm->ring_lock);
0076
0077 insert = READ_ONCE(ring->last);
0078 if (!coalesced_mmio_has_room(dev, insert) ||
0079 insert >= KVM_COALESCED_MMIO_MAX) {
0080 spin_unlock(&dev->kvm->ring_lock);
0081 return -EOPNOTSUPP;
0082 }
0083
0084
0085
0086 ring->coalesced_mmio[insert].phys_addr = addr;
0087 ring->coalesced_mmio[insert].len = len;
0088 memcpy(ring->coalesced_mmio[insert].data, val, len);
0089 ring->coalesced_mmio[insert].pio = dev->zone.pio;
0090 smp_wmb();
0091 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
0092 spin_unlock(&dev->kvm->ring_lock);
0093 return 0;
0094 }
0095
0096 static void coalesced_mmio_destructor(struct kvm_io_device *this)
0097 {
0098 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
0099
0100 list_del(&dev->list);
0101
0102 kfree(dev);
0103 }
0104
0105 static const struct kvm_io_device_ops coalesced_mmio_ops = {
0106 .write = coalesced_mmio_write,
0107 .destructor = coalesced_mmio_destructor,
0108 };
0109
0110 int kvm_coalesced_mmio_init(struct kvm *kvm)
0111 {
0112 struct page *page;
0113
0114 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
0115 if (!page)
0116 return -ENOMEM;
0117
0118 kvm->coalesced_mmio_ring = page_address(page);
0119
0120
0121
0122
0123
0124
0125 spin_lock_init(&kvm->ring_lock);
0126 INIT_LIST_HEAD(&kvm->coalesced_zones);
0127
0128 return 0;
0129 }
0130
0131 void kvm_coalesced_mmio_free(struct kvm *kvm)
0132 {
0133 if (kvm->coalesced_mmio_ring)
0134 free_page((unsigned long)kvm->coalesced_mmio_ring);
0135 }
0136
0137 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
0138 struct kvm_coalesced_mmio_zone *zone)
0139 {
0140 int ret;
0141 struct kvm_coalesced_mmio_dev *dev;
0142
0143 if (zone->pio != 1 && zone->pio != 0)
0144 return -EINVAL;
0145
0146 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
0147 GFP_KERNEL_ACCOUNT);
0148 if (!dev)
0149 return -ENOMEM;
0150
0151 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
0152 dev->kvm = kvm;
0153 dev->zone = *zone;
0154
0155 mutex_lock(&kvm->slots_lock);
0156 ret = kvm_io_bus_register_dev(kvm,
0157 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
0158 zone->addr, zone->size, &dev->dev);
0159 if (ret < 0)
0160 goto out_free_dev;
0161 list_add_tail(&dev->list, &kvm->coalesced_zones);
0162 mutex_unlock(&kvm->slots_lock);
0163
0164 return 0;
0165
0166 out_free_dev:
0167 mutex_unlock(&kvm->slots_lock);
0168 kfree(dev);
0169
0170 return ret;
0171 }
0172
0173 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
0174 struct kvm_coalesced_mmio_zone *zone)
0175 {
0176 struct kvm_coalesced_mmio_dev *dev, *tmp;
0177 int r;
0178
0179 if (zone->pio != 1 && zone->pio != 0)
0180 return -EINVAL;
0181
0182 mutex_lock(&kvm->slots_lock);
0183
0184 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
0185 if (zone->pio == dev->zone.pio &&
0186 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
0187 r = kvm_io_bus_unregister_dev(kvm,
0188 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
0189
0190
0191
0192
0193
0194
0195
0196 if (r)
0197 break;
0198 kvm_iodevice_destructor(&dev->dev);
0199 }
0200 }
0201
0202 mutex_unlock(&kvm->slots_lock);
0203
0204
0205
0206
0207
0208 return 0;
0209 }