0001
0002
0003 #include <linux/virtio_pci_modern.h>
0004 #include <linux/module.h>
0005 #include <linux/pci.h>
0006 #include <linux/delay.h>
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 static void __iomem *
0022 vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
0023 size_t minlen, u32 align, u32 start, u32 size,
0024 size_t *len, resource_size_t *pa)
0025 {
0026 struct pci_dev *dev = mdev->pci_dev;
0027 u8 bar;
0028 u32 offset, length;
0029 void __iomem *p;
0030
0031 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
0032 bar),
0033 &bar);
0034 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
0035 &offset);
0036 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
0037 &length);
0038
0039
0040 if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
0041 dev_err(&dev->dev,
0042 "virtio_pci: bar unexpectedly changed to %u\n", bar);
0043 return NULL;
0044 }
0045
0046 if (length <= start) {
0047 dev_err(&dev->dev,
0048 "virtio_pci: bad capability len %u (>%u expected)\n",
0049 length, start);
0050 return NULL;
0051 }
0052
0053 if (length - start < minlen) {
0054 dev_err(&dev->dev,
0055 "virtio_pci: bad capability len %u (>=%zu expected)\n",
0056 length, minlen);
0057 return NULL;
0058 }
0059
0060 length -= start;
0061
0062 if (start + offset < offset) {
0063 dev_err(&dev->dev,
0064 "virtio_pci: map wrap-around %u+%u\n",
0065 start, offset);
0066 return NULL;
0067 }
0068
0069 offset += start;
0070
0071 if (offset & (align - 1)) {
0072 dev_err(&dev->dev,
0073 "virtio_pci: offset %u not aligned to %u\n",
0074 offset, align);
0075 return NULL;
0076 }
0077
0078 if (length > size)
0079 length = size;
0080
0081 if (len)
0082 *len = length;
0083
0084 if (minlen + offset < minlen ||
0085 minlen + offset > pci_resource_len(dev, bar)) {
0086 dev_err(&dev->dev,
0087 "virtio_pci: map virtio %zu@%u "
0088 "out of range on bar %i length %lu\n",
0089 minlen, offset,
0090 bar, (unsigned long)pci_resource_len(dev, bar));
0091 return NULL;
0092 }
0093
0094 p = pci_iomap_range(dev, bar, offset, length);
0095 if (!p)
0096 dev_err(&dev->dev,
0097 "virtio_pci: unable to map virtio %u@%u on bar %i\n",
0098 length, offset, bar);
0099 else if (pa)
0100 *pa = pci_resource_start(dev, bar) + offset;
0101
0102 return p;
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
0115 u32 ioresource_types, int *bars)
0116 {
0117 int pos;
0118
0119 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
0120 pos > 0;
0121 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
0122 u8 type, bar;
0123 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
0124 cfg_type),
0125 &type);
0126 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
0127 bar),
0128 &bar);
0129
0130
0131 if (bar >= PCI_STD_NUM_BARS)
0132 continue;
0133
0134 if (type == cfg_type) {
0135 if (pci_resource_len(dev, bar) &&
0136 pci_resource_flags(dev, bar) & ioresource_types) {
0137 *bars |= (1 << bar);
0138 return pos;
0139 }
0140 }
0141 }
0142 return 0;
0143 }
0144
0145
0146 static inline void check_offsets(void)
0147 {
0148
0149 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
0150 offsetof(struct virtio_pci_cap, cap_vndr));
0151 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
0152 offsetof(struct virtio_pci_cap, cap_next));
0153 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
0154 offsetof(struct virtio_pci_cap, cap_len));
0155 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
0156 offsetof(struct virtio_pci_cap, cfg_type));
0157 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
0158 offsetof(struct virtio_pci_cap, bar));
0159 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
0160 offsetof(struct virtio_pci_cap, offset));
0161 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
0162 offsetof(struct virtio_pci_cap, length));
0163 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
0164 offsetof(struct virtio_pci_notify_cap,
0165 notify_off_multiplier));
0166 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
0167 offsetof(struct virtio_pci_common_cfg,
0168 device_feature_select));
0169 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
0170 offsetof(struct virtio_pci_common_cfg, device_feature));
0171 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
0172 offsetof(struct virtio_pci_common_cfg,
0173 guest_feature_select));
0174 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
0175 offsetof(struct virtio_pci_common_cfg, guest_feature));
0176 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
0177 offsetof(struct virtio_pci_common_cfg, msix_config));
0178 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
0179 offsetof(struct virtio_pci_common_cfg, num_queues));
0180 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
0181 offsetof(struct virtio_pci_common_cfg, device_status));
0182 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
0183 offsetof(struct virtio_pci_common_cfg, config_generation));
0184 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
0185 offsetof(struct virtio_pci_common_cfg, queue_select));
0186 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
0187 offsetof(struct virtio_pci_common_cfg, queue_size));
0188 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
0189 offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
0190 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
0191 offsetof(struct virtio_pci_common_cfg, queue_enable));
0192 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
0193 offsetof(struct virtio_pci_common_cfg, queue_notify_off));
0194 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
0195 offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
0196 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
0197 offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
0198 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
0199 offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
0200 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
0201 offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
0202 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
0203 offsetof(struct virtio_pci_common_cfg, queue_used_lo));
0204 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
0205 offsetof(struct virtio_pci_common_cfg, queue_used_hi));
0206 }
0207
0208
0209
0210
0211
0212
0213
0214
0215 int vp_modern_probe(struct virtio_pci_modern_device *mdev)
0216 {
0217 struct pci_dev *pci_dev = mdev->pci_dev;
0218 int err, common, isr, notify, device;
0219 u32 notify_length;
0220 u32 notify_offset;
0221
0222 check_offsets();
0223
0224
0225 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
0226 return -ENODEV;
0227
0228 if (pci_dev->device < 0x1040) {
0229
0230
0231
0232 mdev->id.device = pci_dev->subsystem_device;
0233 } else {
0234
0235 mdev->id.device = pci_dev->device - 0x1040;
0236 }
0237 mdev->id.vendor = pci_dev->subsystem_vendor;
0238
0239
0240 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
0241 IORESOURCE_IO | IORESOURCE_MEM,
0242 &mdev->modern_bars);
0243 if (!common) {
0244 dev_info(&pci_dev->dev,
0245 "virtio_pci: leaving for legacy driver\n");
0246 return -ENODEV;
0247 }
0248
0249
0250 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
0251 IORESOURCE_IO | IORESOURCE_MEM,
0252 &mdev->modern_bars);
0253 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
0254 IORESOURCE_IO | IORESOURCE_MEM,
0255 &mdev->modern_bars);
0256 if (!isr || !notify) {
0257 dev_err(&pci_dev->dev,
0258 "virtio_pci: missing capabilities %i/%i/%i\n",
0259 common, isr, notify);
0260 return -EINVAL;
0261 }
0262
0263 err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
0264 if (err)
0265 err = dma_set_mask_and_coherent(&pci_dev->dev,
0266 DMA_BIT_MASK(32));
0267 if (err)
0268 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
0269
0270
0271
0272
0273 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
0274 IORESOURCE_IO | IORESOURCE_MEM,
0275 &mdev->modern_bars);
0276
0277 err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
0278 "virtio-pci-modern");
0279 if (err)
0280 return err;
0281
0282 err = -EINVAL;
0283 mdev->common = vp_modern_map_capability(mdev, common,
0284 sizeof(struct virtio_pci_common_cfg), 4,
0285 0, sizeof(struct virtio_pci_common_cfg),
0286 NULL, NULL);
0287 if (!mdev->common)
0288 goto err_map_common;
0289 mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
0290 0, 1,
0291 NULL, NULL);
0292 if (!mdev->isr)
0293 goto err_map_isr;
0294
0295
0296 pci_read_config_dword(pci_dev,
0297 notify + offsetof(struct virtio_pci_notify_cap,
0298 notify_off_multiplier),
0299 &mdev->notify_offset_multiplier);
0300
0301 pci_read_config_dword(pci_dev,
0302 notify + offsetof(struct virtio_pci_notify_cap,
0303 cap.length),
0304 ¬ify_length);
0305
0306 pci_read_config_dword(pci_dev,
0307 notify + offsetof(struct virtio_pci_notify_cap,
0308 cap.offset),
0309 ¬ify_offset);
0310
0311
0312
0313
0314
0315 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
0316 mdev->notify_base = vp_modern_map_capability(mdev, notify,
0317 2, 2,
0318 0, notify_length,
0319 &mdev->notify_len,
0320 &mdev->notify_pa);
0321 if (!mdev->notify_base)
0322 goto err_map_notify;
0323 } else {
0324 mdev->notify_map_cap = notify;
0325 }
0326
0327
0328
0329
0330 if (device) {
0331 mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
0332 0, PAGE_SIZE,
0333 &mdev->device_len,
0334 NULL);
0335 if (!mdev->device)
0336 goto err_map_device;
0337 }
0338
0339 return 0;
0340
0341 err_map_device:
0342 if (mdev->notify_base)
0343 pci_iounmap(pci_dev, mdev->notify_base);
0344 err_map_notify:
0345 pci_iounmap(pci_dev, mdev->isr);
0346 err_map_isr:
0347 pci_iounmap(pci_dev, mdev->common);
0348 err_map_common:
0349 pci_release_selected_regions(pci_dev, mdev->modern_bars);
0350 return err;
0351 }
0352 EXPORT_SYMBOL_GPL(vp_modern_probe);
0353
0354
0355
0356
0357
0358 void vp_modern_remove(struct virtio_pci_modern_device *mdev)
0359 {
0360 struct pci_dev *pci_dev = mdev->pci_dev;
0361
0362 if (mdev->device)
0363 pci_iounmap(pci_dev, mdev->device);
0364 if (mdev->notify_base)
0365 pci_iounmap(pci_dev, mdev->notify_base);
0366 pci_iounmap(pci_dev, mdev->isr);
0367 pci_iounmap(pci_dev, mdev->common);
0368 pci_release_selected_regions(pci_dev, mdev->modern_bars);
0369 }
0370 EXPORT_SYMBOL_GPL(vp_modern_remove);
0371
0372
0373
0374
0375
0376
0377
0378 u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
0379 {
0380 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0381
0382 u64 features;
0383
0384 vp_iowrite32(0, &cfg->device_feature_select);
0385 features = vp_ioread32(&cfg->device_feature);
0386 vp_iowrite32(1, &cfg->device_feature_select);
0387 features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
0388
0389 return features;
0390 }
0391 EXPORT_SYMBOL_GPL(vp_modern_get_features);
0392
0393
0394
0395
0396
0397
0398
0399 u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
0400 {
0401 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0402
0403 u64 features;
0404
0405 vp_iowrite32(0, &cfg->guest_feature_select);
0406 features = vp_ioread32(&cfg->guest_feature);
0407 vp_iowrite32(1, &cfg->guest_feature_select);
0408 features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
0409
0410 return features;
0411 }
0412 EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
0413
0414
0415
0416
0417
0418
0419 void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
0420 u64 features)
0421 {
0422 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0423
0424 vp_iowrite32(0, &cfg->guest_feature_select);
0425 vp_iowrite32((u32)features, &cfg->guest_feature);
0426 vp_iowrite32(1, &cfg->guest_feature_select);
0427 vp_iowrite32(features >> 32, &cfg->guest_feature);
0428 }
0429 EXPORT_SYMBOL_GPL(vp_modern_set_features);
0430
0431
0432
0433
0434
0435
0436
0437 u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
0438 {
0439 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0440
0441 return vp_ioread8(&cfg->config_generation);
0442 }
0443 EXPORT_SYMBOL_GPL(vp_modern_generation);
0444
0445
0446
0447
0448
0449
0450
0451 u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
0452 {
0453 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0454
0455 return vp_ioread8(&cfg->device_status);
0456 }
0457 EXPORT_SYMBOL_GPL(vp_modern_get_status);
0458
0459
0460
0461
0462
0463
0464 void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
0465 u8 status)
0466 {
0467 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0468
0469
0470
0471
0472
0473
0474 vp_iowrite8(status, &cfg->device_status);
0475 }
0476 EXPORT_SYMBOL_GPL(vp_modern_set_status);
0477
0478
0479
0480
0481
0482
0483 int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
0484 {
0485 struct virtio_pci_modern_common_cfg __iomem *cfg;
0486
0487 cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
0488
0489 vp_iowrite16(index, &cfg->cfg.queue_select);
0490 return vp_ioread16(&cfg->queue_reset);
0491 }
0492 EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
0493
0494
0495
0496
0497
0498
0499 void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
0500 {
0501 struct virtio_pci_modern_common_cfg __iomem *cfg;
0502
0503 cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
0504
0505 vp_iowrite16(index, &cfg->cfg.queue_select);
0506 vp_iowrite16(1, &cfg->queue_reset);
0507
0508 while (vp_ioread16(&cfg->queue_reset))
0509 msleep(1);
0510
0511 while (vp_ioread16(&cfg->cfg.queue_enable))
0512 msleep(1);
0513 }
0514 EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
0525 u16 index, u16 vector)
0526 {
0527 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0528
0529 vp_iowrite16(index, &cfg->queue_select);
0530 vp_iowrite16(vector, &cfg->queue_msix_vector);
0531
0532 return vp_ioread16(&cfg->queue_msix_vector);
0533 }
0534 EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
0535
0536
0537
0538
0539
0540
0541
0542
0543 u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
0544 u16 vector)
0545 {
0546 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0547
0548
0549 vp_iowrite16(vector, &cfg->msix_config);
0550
0551
0552 return vp_ioread16(&cfg->msix_config);
0553 }
0554 EXPORT_SYMBOL_GPL(vp_modern_config_vector);
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
0565 u16 index, u64 desc_addr, u64 driver_addr,
0566 u64 device_addr)
0567 {
0568 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
0569
0570 vp_iowrite16(index, &cfg->queue_select);
0571
0572 vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
0573 &cfg->queue_desc_hi);
0574 vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
0575 &cfg->queue_avail_hi);
0576 vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
0577 &cfg->queue_used_hi);
0578 }
0579 EXPORT_SYMBOL_GPL(vp_modern_queue_address);
0580
0581
0582
0583
0584
0585
0586
0587 void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
0588 u16 index, bool enable)
0589 {
0590 vp_iowrite16(index, &mdev->common->queue_select);
0591 vp_iowrite16(enable, &mdev->common->queue_enable);
0592 }
0593 EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
0594
0595
0596
0597
0598
0599
0600
0601
0602 bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
0603 u16 index)
0604 {
0605 vp_iowrite16(index, &mdev->common->queue_select);
0606
0607 return vp_ioread16(&mdev->common->queue_enable);
0608 }
0609 EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
0610
0611
0612
0613
0614
0615
0616
0617 void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
0618 u16 index, u16 size)
0619 {
0620 vp_iowrite16(index, &mdev->common->queue_select);
0621 vp_iowrite16(size, &mdev->common->queue_size);
0622
0623 }
0624 EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
0625
0626
0627
0628
0629
0630
0631
0632
0633 u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
0634 u16 index)
0635 {
0636 vp_iowrite16(index, &mdev->common->queue_select);
0637
0638 return vp_ioread16(&mdev->common->queue_size);
0639
0640 }
0641 EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
0642
0643
0644
0645
0646
0647
0648
0649 u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
0650 {
0651 return vp_ioread16(&mdev->common->num_queues);
0652 }
0653 EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
0654
0655
0656
0657
0658
0659
0660
0661
0662 static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
0663 u16 index)
0664 {
0665 vp_iowrite16(index, &mdev->common->queue_select);
0666
0667 return vp_ioread16(&mdev->common->queue_notify_off);
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
0680 u16 index, resource_size_t *pa)
0681 {
0682 u16 off = vp_modern_get_queue_notify_off(mdev, index);
0683
0684 if (mdev->notify_base) {
0685
0686 if ((u64)off * mdev->notify_offset_multiplier + 2
0687 > mdev->notify_len) {
0688 dev_warn(&mdev->pci_dev->dev,
0689 "bad notification offset %u (x %u) "
0690 "for queue %u > %zd",
0691 off, mdev->notify_offset_multiplier,
0692 index, mdev->notify_len);
0693 return NULL;
0694 }
0695 if (pa)
0696 *pa = mdev->notify_pa +
0697 off * mdev->notify_offset_multiplier;
0698 return mdev->notify_base + off * mdev->notify_offset_multiplier;
0699 } else {
0700 return vp_modern_map_capability(mdev,
0701 mdev->notify_map_cap, 2, 2,
0702 off * mdev->notify_offset_multiplier, 2,
0703 NULL, pa);
0704 }
0705 }
0706 EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
0707
0708 MODULE_VERSION("0.1");
0709 MODULE_DESCRIPTION("Modern Virtio PCI Device");
0710 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
0711 MODULE_LICENSE("GPL");