0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 #define pr_fmt(fmt) "virtio-mmio: " fmt
0056
0057 #include <linux/acpi.h>
0058 #include <linux/dma-mapping.h>
0059 #include <linux/highmem.h>
0060 #include <linux/interrupt.h>
0061 #include <linux/io.h>
0062 #include <linux/list.h>
0063 #include <linux/module.h>
0064 #include <linux/platform_device.h>
0065 #include <linux/pm.h>
0066 #include <linux/slab.h>
0067 #include <linux/spinlock.h>
0068 #include <linux/virtio.h>
0069 #include <linux/virtio_config.h>
0070 #include <uapi/linux/virtio_mmio.h>
0071 #include <linux/virtio_ring.h>
0072
0073
0074
0075
0076
0077 #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
0078
0079
0080
0081 #define to_virtio_mmio_device(_plat_dev) \
0082 container_of(_plat_dev, struct virtio_mmio_device, vdev)
0083
0084 struct virtio_mmio_device {
0085 struct virtio_device vdev;
0086 struct platform_device *pdev;
0087
0088 void __iomem *base;
0089 unsigned long version;
0090
0091
0092 spinlock_t lock;
0093 struct list_head virtqueues;
0094 };
0095
0096 struct virtio_mmio_vq_info {
0097
0098 struct virtqueue *vq;
0099
0100
0101 struct list_head node;
0102 };
0103
0104
0105
0106
0107
0108 static u64 vm_get_features(struct virtio_device *vdev)
0109 {
0110 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0111 u64 features;
0112
0113 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
0114 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
0115 features <<= 32;
0116
0117 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
0118 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
0119
0120 return features;
0121 }
0122
0123 static int vm_finalize_features(struct virtio_device *vdev)
0124 {
0125 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0126
0127
0128 vring_transport_features(vdev);
0129
0130
0131 if (vm_dev->version == 2 &&
0132 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
0133 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
0134 return -EINVAL;
0135 }
0136
0137 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
0138 writel((u32)(vdev->features >> 32),
0139 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
0140
0141 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
0142 writel((u32)vdev->features,
0143 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
0144
0145 return 0;
0146 }
0147
0148 static void vm_get(struct virtio_device *vdev, unsigned int offset,
0149 void *buf, unsigned int len)
0150 {
0151 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0152 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
0153 u8 b;
0154 __le16 w;
0155 __le32 l;
0156
0157 if (vm_dev->version == 1) {
0158 u8 *ptr = buf;
0159 int i;
0160
0161 for (i = 0; i < len; i++)
0162 ptr[i] = readb(base + offset + i);
0163 return;
0164 }
0165
0166 switch (len) {
0167 case 1:
0168 b = readb(base + offset);
0169 memcpy(buf, &b, sizeof b);
0170 break;
0171 case 2:
0172 w = cpu_to_le16(readw(base + offset));
0173 memcpy(buf, &w, sizeof w);
0174 break;
0175 case 4:
0176 l = cpu_to_le32(readl(base + offset));
0177 memcpy(buf, &l, sizeof l);
0178 break;
0179 case 8:
0180 l = cpu_to_le32(readl(base + offset));
0181 memcpy(buf, &l, sizeof l);
0182 l = cpu_to_le32(ioread32(base + offset + sizeof l));
0183 memcpy(buf + sizeof l, &l, sizeof l);
0184 break;
0185 default:
0186 BUG();
0187 }
0188 }
0189
0190 static void vm_set(struct virtio_device *vdev, unsigned int offset,
0191 const void *buf, unsigned int len)
0192 {
0193 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0194 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
0195 u8 b;
0196 __le16 w;
0197 __le32 l;
0198
0199 if (vm_dev->version == 1) {
0200 const u8 *ptr = buf;
0201 int i;
0202
0203 for (i = 0; i < len; i++)
0204 writeb(ptr[i], base + offset + i);
0205
0206 return;
0207 }
0208
0209 switch (len) {
0210 case 1:
0211 memcpy(&b, buf, sizeof b);
0212 writeb(b, base + offset);
0213 break;
0214 case 2:
0215 memcpy(&w, buf, sizeof w);
0216 writew(le16_to_cpu(w), base + offset);
0217 break;
0218 case 4:
0219 memcpy(&l, buf, sizeof l);
0220 writel(le32_to_cpu(l), base + offset);
0221 break;
0222 case 8:
0223 memcpy(&l, buf, sizeof l);
0224 writel(le32_to_cpu(l), base + offset);
0225 memcpy(&l, buf + sizeof l, sizeof l);
0226 writel(le32_to_cpu(l), base + offset + sizeof l);
0227 break;
0228 default:
0229 BUG();
0230 }
0231 }
0232
0233 static u32 vm_generation(struct virtio_device *vdev)
0234 {
0235 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0236
0237 if (vm_dev->version == 1)
0238 return 0;
0239 else
0240 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
0241 }
0242
0243 static u8 vm_get_status(struct virtio_device *vdev)
0244 {
0245 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0246
0247 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
0248 }
0249
0250 static void vm_set_status(struct virtio_device *vdev, u8 status)
0251 {
0252 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0253
0254
0255 BUG_ON(status == 0);
0256
0257
0258
0259
0260
0261
0262 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
0263 }
0264
0265 static void vm_reset(struct virtio_device *vdev)
0266 {
0267 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0268
0269
0270 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
0271 }
0272
0273
0274
0275
0276
0277
0278 static bool vm_notify(struct virtqueue *vq)
0279 {
0280 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
0281
0282
0283
0284 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
0285 return true;
0286 }
0287
0288
0289 static irqreturn_t vm_interrupt(int irq, void *opaque)
0290 {
0291 struct virtio_mmio_device *vm_dev = opaque;
0292 struct virtio_mmio_vq_info *info;
0293 unsigned long status;
0294 unsigned long flags;
0295 irqreturn_t ret = IRQ_NONE;
0296
0297
0298 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
0299 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
0300
0301 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
0302 virtio_config_changed(&vm_dev->vdev);
0303 ret = IRQ_HANDLED;
0304 }
0305
0306 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
0307 spin_lock_irqsave(&vm_dev->lock, flags);
0308 list_for_each_entry(info, &vm_dev->virtqueues, node)
0309 ret |= vring_interrupt(irq, info->vq);
0310 spin_unlock_irqrestore(&vm_dev->lock, flags);
0311 }
0312
0313 return ret;
0314 }
0315
0316
0317
0318 static void vm_del_vq(struct virtqueue *vq)
0319 {
0320 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
0321 struct virtio_mmio_vq_info *info = vq->priv;
0322 unsigned long flags;
0323 unsigned int index = vq->index;
0324
0325 spin_lock_irqsave(&vm_dev->lock, flags);
0326 list_del(&info->node);
0327 spin_unlock_irqrestore(&vm_dev->lock, flags);
0328
0329
0330 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
0331 if (vm_dev->version == 1) {
0332 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
0333 } else {
0334 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
0335 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
0336 }
0337
0338 vring_del_virtqueue(vq);
0339
0340 kfree(info);
0341 }
0342
0343 static void vm_del_vqs(struct virtio_device *vdev)
0344 {
0345 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0346 struct virtqueue *vq, *n;
0347
0348 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
0349 vm_del_vq(vq);
0350
0351 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
0352 }
0353
0354 static void vm_synchronize_cbs(struct virtio_device *vdev)
0355 {
0356 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0357
0358 synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
0359 }
0360
0361 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
0362 void (*callback)(struct virtqueue *vq),
0363 const char *name, bool ctx)
0364 {
0365 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0366 struct virtio_mmio_vq_info *info;
0367 struct virtqueue *vq;
0368 unsigned long flags;
0369 unsigned int num;
0370 int err;
0371
0372 if (!name)
0373 return NULL;
0374
0375
0376 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
0377
0378
0379 if (readl(vm_dev->base + (vm_dev->version == 1 ?
0380 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
0381 err = -ENOENT;
0382 goto error_available;
0383 }
0384
0385
0386 info = kmalloc(sizeof(*info), GFP_KERNEL);
0387 if (!info) {
0388 err = -ENOMEM;
0389 goto error_kmalloc;
0390 }
0391
0392 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
0393 if (num == 0) {
0394 err = -ENOENT;
0395 goto error_new_virtqueue;
0396 }
0397
0398
0399 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
0400 true, true, ctx, vm_notify, callback, name);
0401 if (!vq) {
0402 err = -ENOMEM;
0403 goto error_new_virtqueue;
0404 }
0405
0406 vq->num_max = num;
0407
0408
0409 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
0410 if (vm_dev->version == 1) {
0411 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
0412
0413
0414
0415
0416
0417
0418 if (q_pfn >> 32) {
0419 dev_err(&vdev->dev,
0420 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
0421 0x1ULL << (32 + PAGE_SHIFT - 30));
0422 err = -E2BIG;
0423 goto error_bad_pfn;
0424 }
0425
0426 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
0427 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
0428 } else {
0429 u64 addr;
0430
0431 addr = virtqueue_get_desc_addr(vq);
0432 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
0433 writel((u32)(addr >> 32),
0434 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
0435
0436 addr = virtqueue_get_avail_addr(vq);
0437 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
0438 writel((u32)(addr >> 32),
0439 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
0440
0441 addr = virtqueue_get_used_addr(vq);
0442 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
0443 writel((u32)(addr >> 32),
0444 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
0445
0446 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
0447 }
0448
0449 vq->priv = info;
0450 info->vq = vq;
0451
0452 spin_lock_irqsave(&vm_dev->lock, flags);
0453 list_add(&info->node, &vm_dev->virtqueues);
0454 spin_unlock_irqrestore(&vm_dev->lock, flags);
0455
0456 return vq;
0457
0458 error_bad_pfn:
0459 vring_del_virtqueue(vq);
0460 error_new_virtqueue:
0461 if (vm_dev->version == 1) {
0462 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
0463 } else {
0464 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
0465 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
0466 }
0467 kfree(info);
0468 error_kmalloc:
0469 error_available:
0470 return ERR_PTR(err);
0471 }
0472
0473 static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
0474 struct virtqueue *vqs[],
0475 vq_callback_t *callbacks[],
0476 const char * const names[],
0477 const bool *ctx,
0478 struct irq_affinity *desc)
0479 {
0480 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0481 int irq = platform_get_irq(vm_dev->pdev, 0);
0482 int i, err, queue_idx = 0;
0483
0484 if (irq < 0)
0485 return irq;
0486
0487 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
0488 dev_name(&vdev->dev), vm_dev);
0489 if (err)
0490 return err;
0491
0492 if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
0493 enable_irq_wake(irq);
0494
0495 for (i = 0; i < nvqs; ++i) {
0496 if (!names[i]) {
0497 vqs[i] = NULL;
0498 continue;
0499 }
0500
0501 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
0502 ctx ? ctx[i] : false);
0503 if (IS_ERR(vqs[i])) {
0504 vm_del_vqs(vdev);
0505 return PTR_ERR(vqs[i]);
0506 }
0507 }
0508
0509 return 0;
0510 }
0511
0512 static const char *vm_bus_name(struct virtio_device *vdev)
0513 {
0514 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0515
0516 return vm_dev->pdev->name;
0517 }
0518
0519 static bool vm_get_shm_region(struct virtio_device *vdev,
0520 struct virtio_shm_region *region, u8 id)
0521 {
0522 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0523 u64 len, addr;
0524
0525
0526 writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
0527
0528
0529 len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
0530 len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
0531
0532 region->len = len;
0533
0534
0535
0536
0537 if (len == ~(u64)0)
0538 return false;
0539
0540
0541 addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
0542 addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
0543
0544 region->addr = addr;
0545
0546 return true;
0547 }
0548
0549 static const struct virtio_config_ops virtio_mmio_config_ops = {
0550 .get = vm_get,
0551 .set = vm_set,
0552 .generation = vm_generation,
0553 .get_status = vm_get_status,
0554 .set_status = vm_set_status,
0555 .reset = vm_reset,
0556 .find_vqs = vm_find_vqs,
0557 .del_vqs = vm_del_vqs,
0558 .get_features = vm_get_features,
0559 .finalize_features = vm_finalize_features,
0560 .bus_name = vm_bus_name,
0561 .get_shm_region = vm_get_shm_region,
0562 .synchronize_cbs = vm_synchronize_cbs,
0563 };
0564
0565 #ifdef CONFIG_PM_SLEEP
0566 static int virtio_mmio_freeze(struct device *dev)
0567 {
0568 struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
0569
0570 return virtio_device_freeze(&vm_dev->vdev);
0571 }
0572
0573 static int virtio_mmio_restore(struct device *dev)
0574 {
0575 struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
0576
0577 if (vm_dev->version == 1)
0578 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
0579
0580 return virtio_device_restore(&vm_dev->vdev);
0581 }
0582
0583 static const struct dev_pm_ops virtio_mmio_pm_ops = {
0584 SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
0585 };
0586 #endif
0587
0588 static void virtio_mmio_release_dev(struct device *_d)
0589 {
0590 struct virtio_device *vdev =
0591 container_of(_d, struct virtio_device, dev);
0592 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
0593 struct platform_device *pdev = vm_dev->pdev;
0594
0595 devm_kfree(&pdev->dev, vm_dev);
0596 }
0597
0598
0599
0600 static int virtio_mmio_probe(struct platform_device *pdev)
0601 {
0602 struct virtio_mmio_device *vm_dev;
0603 unsigned long magic;
0604 int rc;
0605
0606 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
0607 if (!vm_dev)
0608 return -ENOMEM;
0609
0610 vm_dev->vdev.dev.parent = &pdev->dev;
0611 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
0612 vm_dev->vdev.config = &virtio_mmio_config_ops;
0613 vm_dev->pdev = pdev;
0614 INIT_LIST_HEAD(&vm_dev->virtqueues);
0615 spin_lock_init(&vm_dev->lock);
0616
0617 vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
0618 if (IS_ERR(vm_dev->base))
0619 return PTR_ERR(vm_dev->base);
0620
0621
0622 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
0623 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
0624 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
0625 return -ENODEV;
0626 }
0627
0628
0629 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
0630 if (vm_dev->version < 1 || vm_dev->version > 2) {
0631 dev_err(&pdev->dev, "Version %ld not supported!\n",
0632 vm_dev->version);
0633 return -ENXIO;
0634 }
0635
0636 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
0637 if (vm_dev->vdev.id.device == 0) {
0638
0639
0640
0641
0642 return -ENODEV;
0643 }
0644 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
0645
0646 if (vm_dev->version == 1) {
0647 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
0648
0649 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
0650
0651
0652
0653
0654 if (!rc)
0655 dma_set_coherent_mask(&pdev->dev,
0656 DMA_BIT_MASK(32 + PAGE_SHIFT));
0657 } else {
0658 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0659 }
0660 if (rc)
0661 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0662 if (rc)
0663 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
0664
0665 platform_set_drvdata(pdev, vm_dev);
0666
0667 rc = register_virtio_device(&vm_dev->vdev);
0668 if (rc)
0669 put_device(&vm_dev->vdev.dev);
0670
0671 return rc;
0672 }
0673
0674 static int virtio_mmio_remove(struct platform_device *pdev)
0675 {
0676 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
0677 unregister_virtio_device(&vm_dev->vdev);
0678
0679 return 0;
0680 }
0681
0682
0683
0684
0685
0686 #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
0687
0688 static struct device vm_cmdline_parent = {
0689 .init_name = "virtio-mmio-cmdline",
0690 };
0691
0692 static int vm_cmdline_parent_registered;
0693 static int vm_cmdline_id;
0694
0695 static int vm_cmdline_set(const char *device,
0696 const struct kernel_param *kp)
0697 {
0698 int err;
0699 struct resource resources[2] = {};
0700 char *str;
0701 long long base, size;
0702 unsigned int irq;
0703 int processed, consumed = 0;
0704 struct platform_device *pdev;
0705
0706
0707 size = memparse(device, &str);
0708
0709
0710 processed = sscanf(str, "@%lli:%u%n:%d%n",
0711 &base, &irq, &consumed,
0712 &vm_cmdline_id, &consumed);
0713
0714
0715
0716
0717
0718
0719 if (processed < 2 || str[consumed] || irq == 0)
0720 return -EINVAL;
0721
0722 resources[0].flags = IORESOURCE_MEM;
0723 resources[0].start = base;
0724 resources[0].end = base + size - 1;
0725
0726 resources[1].flags = IORESOURCE_IRQ;
0727 resources[1].start = resources[1].end = irq;
0728
0729 if (!vm_cmdline_parent_registered) {
0730 err = device_register(&vm_cmdline_parent);
0731 if (err) {
0732 put_device(&vm_cmdline_parent);
0733 pr_err("Failed to register parent device!\n");
0734 return err;
0735 }
0736 vm_cmdline_parent_registered = 1;
0737 }
0738
0739 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
0740 vm_cmdline_id,
0741 (unsigned long long)resources[0].start,
0742 (unsigned long long)resources[0].end,
0743 (int)resources[1].start);
0744
0745 pdev = platform_device_register_resndata(&vm_cmdline_parent,
0746 "virtio-mmio", vm_cmdline_id++,
0747 resources, ARRAY_SIZE(resources), NULL, 0);
0748
0749 return PTR_ERR_OR_ZERO(pdev);
0750 }
0751
0752 static int vm_cmdline_get_device(struct device *dev, void *data)
0753 {
0754 char *buffer = data;
0755 unsigned int len = strlen(buffer);
0756 struct platform_device *pdev = to_platform_device(dev);
0757
0758 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
0759 pdev->resource[0].end - pdev->resource[0].start + 1ULL,
0760 (unsigned long long)pdev->resource[0].start,
0761 (unsigned long long)pdev->resource[1].start,
0762 pdev->id);
0763 return 0;
0764 }
0765
0766 static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
0767 {
0768 buffer[0] = '\0';
0769 device_for_each_child(&vm_cmdline_parent, buffer,
0770 vm_cmdline_get_device);
0771 return strlen(buffer) + 1;
0772 }
0773
0774 static const struct kernel_param_ops vm_cmdline_param_ops = {
0775 .set = vm_cmdline_set,
0776 .get = vm_cmdline_get,
0777 };
0778
0779 device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
0780
0781 static int vm_unregister_cmdline_device(struct device *dev,
0782 void *data)
0783 {
0784 platform_device_unregister(to_platform_device(dev));
0785
0786 return 0;
0787 }
0788
0789 static void vm_unregister_cmdline_devices(void)
0790 {
0791 if (vm_cmdline_parent_registered) {
0792 device_for_each_child(&vm_cmdline_parent, NULL,
0793 vm_unregister_cmdline_device);
0794 device_unregister(&vm_cmdline_parent);
0795 vm_cmdline_parent_registered = 0;
0796 }
0797 }
0798
0799 #else
0800
0801 static void vm_unregister_cmdline_devices(void)
0802 {
0803 }
0804
0805 #endif
0806
0807
0808
0809 static const struct of_device_id virtio_mmio_match[] = {
0810 { .compatible = "virtio,mmio", },
0811 {},
0812 };
0813 MODULE_DEVICE_TABLE(of, virtio_mmio_match);
0814
0815 #ifdef CONFIG_ACPI
0816 static const struct acpi_device_id virtio_mmio_acpi_match[] = {
0817 { "LNRO0005", },
0818 { }
0819 };
0820 MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
0821 #endif
0822
0823 static struct platform_driver virtio_mmio_driver = {
0824 .probe = virtio_mmio_probe,
0825 .remove = virtio_mmio_remove,
0826 .driver = {
0827 .name = "virtio-mmio",
0828 .of_match_table = virtio_mmio_match,
0829 .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
0830 #ifdef CONFIG_PM_SLEEP
0831 .pm = &virtio_mmio_pm_ops,
0832 #endif
0833 },
0834 };
0835
0836 static int __init virtio_mmio_init(void)
0837 {
0838 return platform_driver_register(&virtio_mmio_driver);
0839 }
0840
0841 static void __exit virtio_mmio_exit(void)
0842 {
0843 platform_driver_unregister(&virtio_mmio_driver);
0844 vm_unregister_cmdline_devices();
0845 }
0846
0847 module_init(virtio_mmio_init);
0848 module_exit(virtio_mmio_exit);
0849
0850 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
0851 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
0852 MODULE_LICENSE("GPL");