0001
0002
0003
0004
0005
0006
0007 #include <linux/device.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/irq.h>
0010 #include <linux/kernel.h>
0011 #include <linux/module.h>
0012 #include <linux/msi.h>
0013 #include <linux/pci.h>
0014 #include <linux/pci-acpi.h>
0015 #include <linux/pci-ecam.h>
0016 #include <linux/srcu.h>
0017 #include <linux/rculist.h>
0018 #include <linux/rcupdate.h>
0019
0020 #include <asm/irqdomain.h>
0021
0022 #define VMD_CFGBAR 0
0023 #define VMD_MEMBAR1 2
0024 #define VMD_MEMBAR2 4
0025
0026 #define PCI_REG_VMCAP 0x40
0027 #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
0028 #define PCI_REG_VMCONFIG 0x44
0029 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
0030 #define VMCONFIG_MSI_REMAP 0x2
0031 #define PCI_REG_VMLOCK 0x70
0032 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
0033
0034 #define MB2_SHADOW_OFFSET 0x2000
0035 #define MB2_SHADOW_SIZE 16
0036
0037 enum vmd_features {
0038
0039
0040
0041
0042
0043 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
0044
0045
0046
0047
0048
0049 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
0050
0051
0052
0053
0054
0055 VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
0056
0057
0058
0059
0060
0061 VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
0062
0063
0064
0065
0066
0067
0068 VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
0069 };
0070
0071 static DEFINE_IDA(vmd_instance_ida);
0072
0073
0074
0075
0076 static DEFINE_RAW_SPINLOCK(list_lock);
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 struct vmd_irq {
0089 struct list_head node;
0090 struct vmd_irq_list *irq;
0091 bool enabled;
0092 unsigned int virq;
0093 };
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 struct vmd_irq_list {
0104 struct list_head irq_list;
0105 struct srcu_struct srcu;
0106 unsigned int count;
0107 unsigned int virq;
0108 };
0109
0110 struct vmd_dev {
0111 struct pci_dev *dev;
0112
0113 spinlock_t cfg_lock;
0114 void __iomem *cfgbar;
0115
0116 int msix_count;
0117 struct vmd_irq_list *irqs;
0118
0119 struct pci_sysdata sysdata;
0120 struct resource resources[3];
0121 struct irq_domain *irq_domain;
0122 struct pci_bus *bus;
0123 u8 busn_start;
0124 u8 first_vec;
0125 char *name;
0126 int instance;
0127 };
0128
0129 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
0130 {
0131 return container_of(bus->sysdata, struct vmd_dev, sysdata);
0132 }
0133
0134 static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
0135 struct vmd_irq_list *irqs)
0136 {
0137 return irqs - vmd->irqs;
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148 static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
0149 {
0150 struct vmd_irq *vmdirq = data->chip_data;
0151 struct vmd_irq_list *irq = vmdirq->irq;
0152 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
0153
0154 memset(msg, 0, sizeof(*msg));
0155 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
0156 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
0157 msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
0158 }
0159
0160
0161
0162
0163 static void vmd_irq_enable(struct irq_data *data)
0164 {
0165 struct vmd_irq *vmdirq = data->chip_data;
0166 unsigned long flags;
0167
0168 raw_spin_lock_irqsave(&list_lock, flags);
0169 WARN_ON(vmdirq->enabled);
0170 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
0171 vmdirq->enabled = true;
0172 raw_spin_unlock_irqrestore(&list_lock, flags);
0173
0174 data->chip->irq_unmask(data);
0175 }
0176
0177 static void vmd_irq_disable(struct irq_data *data)
0178 {
0179 struct vmd_irq *vmdirq = data->chip_data;
0180 unsigned long flags;
0181
0182 data->chip->irq_mask(data);
0183
0184 raw_spin_lock_irqsave(&list_lock, flags);
0185 if (vmdirq->enabled) {
0186 list_del_rcu(&vmdirq->node);
0187 vmdirq->enabled = false;
0188 }
0189 raw_spin_unlock_irqrestore(&list_lock, flags);
0190 }
0191
0192
0193
0194
0195
0196 static int vmd_irq_set_affinity(struct irq_data *data,
0197 const struct cpumask *dest, bool force)
0198 {
0199 return -EINVAL;
0200 }
0201
0202 static struct irq_chip vmd_msi_controller = {
0203 .name = "VMD-MSI",
0204 .irq_enable = vmd_irq_enable,
0205 .irq_disable = vmd_irq_disable,
0206 .irq_compose_msi_msg = vmd_compose_msi_msg,
0207 .irq_set_affinity = vmd_irq_set_affinity,
0208 };
0209
0210 static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
0211 msi_alloc_info_t *arg)
0212 {
0213 return 0;
0214 }
0215
0216
0217
0218
0219
0220 static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
0221 {
0222 unsigned long flags;
0223 int i, best;
0224
0225 if (vmd->msix_count == 1 + vmd->first_vec)
0226 return &vmd->irqs[vmd->first_vec];
0227
0228
0229
0230
0231
0232 switch (msi_desc_to_pci_dev(desc)->class) {
0233 case PCI_CLASS_STORAGE_EXPRESS:
0234 break;
0235 default:
0236 return &vmd->irqs[vmd->first_vec];
0237 }
0238
0239 raw_spin_lock_irqsave(&list_lock, flags);
0240 best = vmd->first_vec + 1;
0241 for (i = best; i < vmd->msix_count; i++)
0242 if (vmd->irqs[i].count < vmd->irqs[best].count)
0243 best = i;
0244 vmd->irqs[best].count++;
0245 raw_spin_unlock_irqrestore(&list_lock, flags);
0246
0247 return &vmd->irqs[best];
0248 }
0249
0250 static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
0251 unsigned int virq, irq_hw_number_t hwirq,
0252 msi_alloc_info_t *arg)
0253 {
0254 struct msi_desc *desc = arg->desc;
0255 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
0256 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
0257
0258 if (!vmdirq)
0259 return -ENOMEM;
0260
0261 INIT_LIST_HEAD(&vmdirq->node);
0262 vmdirq->irq = vmd_next_irq(vmd, desc);
0263 vmdirq->virq = virq;
0264
0265 irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
0266 handle_untracked_irq, vmd, NULL);
0267 return 0;
0268 }
0269
0270 static void vmd_msi_free(struct irq_domain *domain,
0271 struct msi_domain_info *info, unsigned int virq)
0272 {
0273 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
0274 unsigned long flags;
0275
0276 synchronize_srcu(&vmdirq->irq->srcu);
0277
0278
0279 raw_spin_lock_irqsave(&list_lock, flags);
0280 vmdirq->irq->count--;
0281 raw_spin_unlock_irqrestore(&list_lock, flags);
0282
0283 kfree(vmdirq);
0284 }
0285
0286 static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
0287 int nvec, msi_alloc_info_t *arg)
0288 {
0289 struct pci_dev *pdev = to_pci_dev(dev);
0290 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
0291
0292 if (nvec > vmd->msix_count)
0293 return vmd->msix_count;
0294
0295 memset(arg, 0, sizeof(*arg));
0296 return 0;
0297 }
0298
0299 static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
0300 {
0301 arg->desc = desc;
0302 }
0303
0304 static struct msi_domain_ops vmd_msi_domain_ops = {
0305 .get_hwirq = vmd_get_hwirq,
0306 .msi_init = vmd_msi_init,
0307 .msi_free = vmd_msi_free,
0308 .msi_prepare = vmd_msi_prepare,
0309 .set_desc = vmd_set_desc,
0310 };
0311
0312 static struct msi_domain_info vmd_msi_domain_info = {
0313 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0314 MSI_FLAG_PCI_MSIX,
0315 .ops = &vmd_msi_domain_ops,
0316 .chip = &vmd_msi_controller,
0317 };
0318
0319 static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
0320 {
0321 u16 reg;
0322
0323 pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
0324 reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
0325 (reg | VMCONFIG_MSI_REMAP);
0326 pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
0327 }
0328
0329 static int vmd_create_irq_domain(struct vmd_dev *vmd)
0330 {
0331 struct fwnode_handle *fn;
0332
0333 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
0334 if (!fn)
0335 return -ENODEV;
0336
0337 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
0338 if (!vmd->irq_domain) {
0339 irq_domain_free_fwnode(fn);
0340 return -ENODEV;
0341 }
0342
0343 return 0;
0344 }
0345
0346 static void vmd_remove_irq_domain(struct vmd_dev *vmd)
0347 {
0348
0349
0350
0351
0352 if (!vmd->msix_count)
0353 vmd_set_msi_remapping(vmd, true);
0354
0355 if (vmd->irq_domain) {
0356 struct fwnode_handle *fn = vmd->irq_domain->fwnode;
0357
0358 irq_domain_remove(vmd->irq_domain);
0359 irq_domain_free_fwnode(fn);
0360 }
0361 }
0362
0363 static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
0364 unsigned int devfn, int reg, int len)
0365 {
0366 unsigned int busnr_ecam = bus->number - vmd->busn_start;
0367 u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg);
0368
0369 if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR]))
0370 return NULL;
0371
0372 return vmd->cfgbar + offset;
0373 }
0374
0375
0376
0377
0378
0379 static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
0380 int len, u32 *value)
0381 {
0382 struct vmd_dev *vmd = vmd_from_bus(bus);
0383 void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
0384 unsigned long flags;
0385 int ret = 0;
0386
0387 if (!addr)
0388 return -EFAULT;
0389
0390 spin_lock_irqsave(&vmd->cfg_lock, flags);
0391 switch (len) {
0392 case 1:
0393 *value = readb(addr);
0394 break;
0395 case 2:
0396 *value = readw(addr);
0397 break;
0398 case 4:
0399 *value = readl(addr);
0400 break;
0401 default:
0402 ret = -EINVAL;
0403 break;
0404 }
0405 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
0406 return ret;
0407 }
0408
0409
0410
0411
0412
0413
0414 static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
0415 int len, u32 value)
0416 {
0417 struct vmd_dev *vmd = vmd_from_bus(bus);
0418 void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
0419 unsigned long flags;
0420 int ret = 0;
0421
0422 if (!addr)
0423 return -EFAULT;
0424
0425 spin_lock_irqsave(&vmd->cfg_lock, flags);
0426 switch (len) {
0427 case 1:
0428 writeb(value, addr);
0429 readb(addr);
0430 break;
0431 case 2:
0432 writew(value, addr);
0433 readw(addr);
0434 break;
0435 case 4:
0436 writel(value, addr);
0437 readl(addr);
0438 break;
0439 default:
0440 ret = -EINVAL;
0441 break;
0442 }
0443 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
0444 return ret;
0445 }
0446
0447 static struct pci_ops vmd_ops = {
0448 .read = vmd_pci_read,
0449 .write = vmd_pci_write,
0450 };
0451
0452 #ifdef CONFIG_ACPI
0453 static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
0454 {
0455 struct pci_host_bridge *bridge;
0456 u32 busnr, addr;
0457
0458 if (pci_dev->bus->ops != &vmd_ops)
0459 return NULL;
0460
0461 bridge = pci_find_host_bridge(pci_dev->bus);
0462 busnr = pci_dev->bus->number - bridge->bus->number;
0463
0464
0465
0466
0467 if (busnr > 31)
0468 return NULL;
0469
0470 addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
0471
0472 dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
0473 addr);
0474
0475 return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
0476 false);
0477 }
0478
0479 static bool hook_installed;
0480
0481 static void vmd_acpi_begin(void)
0482 {
0483 if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
0484 return;
0485
0486 hook_installed = true;
0487 }
0488
0489 static void vmd_acpi_end(void)
0490 {
0491 if (!hook_installed)
0492 return;
0493
0494 pci_acpi_clear_companion_lookup_hook();
0495 hook_installed = false;
0496 }
0497 #else
0498 static inline void vmd_acpi_begin(void) { }
0499 static inline void vmd_acpi_end(void) { }
0500 #endif
0501
0502 static void vmd_domain_reset(struct vmd_dev *vmd)
0503 {
0504 u16 bus, max_buses = resource_size(&vmd->resources[0]);
0505 u8 dev, functions, fn, hdr_type;
0506 char __iomem *base;
0507
0508 for (bus = 0; bus < max_buses; bus++) {
0509 for (dev = 0; dev < 32; dev++) {
0510 base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
0511 PCI_DEVFN(dev, 0), 0);
0512
0513 hdr_type = readb(base + PCI_HEADER_TYPE) &
0514 PCI_HEADER_TYPE_MASK;
0515
0516 functions = (hdr_type & 0x80) ? 8 : 1;
0517 for (fn = 0; fn < functions; fn++) {
0518 base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
0519 PCI_DEVFN(dev, fn), 0);
0520
0521 hdr_type = readb(base + PCI_HEADER_TYPE) &
0522 PCI_HEADER_TYPE_MASK;
0523
0524 if (hdr_type != PCI_HEADER_TYPE_BRIDGE ||
0525 (readw(base + PCI_CLASS_DEVICE) !=
0526 PCI_CLASS_BRIDGE_PCI))
0527 continue;
0528
0529 memset_io(base + PCI_IO_BASE, 0,
0530 PCI_ROM_ADDRESS1 - PCI_IO_BASE);
0531 }
0532 }
0533 }
0534 }
0535
0536 static void vmd_attach_resources(struct vmd_dev *vmd)
0537 {
0538 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
0539 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
0540 }
0541
0542 static void vmd_detach_resources(struct vmd_dev *vmd)
0543 {
0544 vmd->dev->resource[VMD_MEMBAR1].child = NULL;
0545 vmd->dev->resource[VMD_MEMBAR2].child = NULL;
0546 }
0547
0548
0549
0550
0551
0552
0553
0554 static int vmd_find_free_domain(void)
0555 {
0556 int domain = 0xffff;
0557 struct pci_bus *bus = NULL;
0558
0559 while ((bus = pci_find_next_bus(bus)) != NULL)
0560 domain = max_t(int, domain, pci_domain_nr(bus));
0561 return domain + 1;
0562 }
0563
0564 static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
0565 resource_size_t *offset1,
0566 resource_size_t *offset2)
0567 {
0568 struct pci_dev *dev = vmd->dev;
0569 u64 phys1, phys2;
0570
0571 if (native_hint) {
0572 u32 vmlock;
0573 int ret;
0574
0575 ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
0576 if (ret || PCI_POSSIBLE_ERROR(vmlock))
0577 return -ENODEV;
0578
0579 if (MB2_SHADOW_EN(vmlock)) {
0580 void __iomem *membar2;
0581
0582 membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
0583 if (!membar2)
0584 return -ENOMEM;
0585 phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
0586 phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
0587 pci_iounmap(dev, membar2);
0588 } else
0589 return 0;
0590 } else {
0591
0592 int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
0593 u32 reg, regu;
0594
0595 pci_read_config_dword(dev, pos + 4, ®);
0596
0597
0598 if (pos && reg == 0x53484457) {
0599 pci_read_config_dword(dev, pos + 8, ®);
0600 pci_read_config_dword(dev, pos + 12, ®u);
0601 phys1 = (u64) regu << 32 | reg;
0602
0603 pci_read_config_dword(dev, pos + 16, ®);
0604 pci_read_config_dword(dev, pos + 20, ®u);
0605 phys2 = (u64) regu << 32 | reg;
0606 } else
0607 return 0;
0608 }
0609
0610 *offset1 = dev->resource[VMD_MEMBAR1].start -
0611 (phys1 & PCI_BASE_ADDRESS_MEM_MASK);
0612 *offset2 = dev->resource[VMD_MEMBAR2].start -
0613 (phys2 & PCI_BASE_ADDRESS_MEM_MASK);
0614
0615 return 0;
0616 }
0617
0618 static int vmd_get_bus_number_start(struct vmd_dev *vmd)
0619 {
0620 struct pci_dev *dev = vmd->dev;
0621 u16 reg;
0622
0623 pci_read_config_word(dev, PCI_REG_VMCAP, ®);
0624 if (BUS_RESTRICT_CAP(reg)) {
0625 pci_read_config_word(dev, PCI_REG_VMCONFIG, ®);
0626
0627 switch (BUS_RESTRICT_CFG(reg)) {
0628 case 0:
0629 vmd->busn_start = 0;
0630 break;
0631 case 1:
0632 vmd->busn_start = 128;
0633 break;
0634 case 2:
0635 vmd->busn_start = 224;
0636 break;
0637 default:
0638 pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
0639 BUS_RESTRICT_CFG(reg));
0640 return -ENODEV;
0641 }
0642 }
0643
0644 return 0;
0645 }
0646
0647 static irqreturn_t vmd_irq(int irq, void *data)
0648 {
0649 struct vmd_irq_list *irqs = data;
0650 struct vmd_irq *vmdirq;
0651 int idx;
0652
0653 idx = srcu_read_lock(&irqs->srcu);
0654 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
0655 generic_handle_irq(vmdirq->virq);
0656 srcu_read_unlock(&irqs->srcu, idx);
0657
0658 return IRQ_HANDLED;
0659 }
0660
0661 static int vmd_alloc_irqs(struct vmd_dev *vmd)
0662 {
0663 struct pci_dev *dev = vmd->dev;
0664 int i, err;
0665
0666 vmd->msix_count = pci_msix_vec_count(dev);
0667 if (vmd->msix_count < 0)
0668 return -ENODEV;
0669
0670 vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1,
0671 vmd->msix_count, PCI_IRQ_MSIX);
0672 if (vmd->msix_count < 0)
0673 return vmd->msix_count;
0674
0675 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
0676 GFP_KERNEL);
0677 if (!vmd->irqs)
0678 return -ENOMEM;
0679
0680 for (i = 0; i < vmd->msix_count; i++) {
0681 err = init_srcu_struct(&vmd->irqs[i].srcu);
0682 if (err)
0683 return err;
0684
0685 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
0686 vmd->irqs[i].virq = pci_irq_vector(dev, i);
0687 err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
0688 vmd_irq, IRQF_NO_THREAD,
0689 vmd->name, &vmd->irqs[i]);
0690 if (err)
0691 return err;
0692 }
0693
0694 return 0;
0695 }
0696
0697
0698
0699
0700
0701 static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
0702 struct pci_host_bridge *vmd_bridge)
0703 {
0704 vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug;
0705 vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug;
0706 vmd_bridge->native_aer = root_bridge->native_aer;
0707 vmd_bridge->native_pme = root_bridge->native_pme;
0708 vmd_bridge->native_ltr = root_bridge->native_ltr;
0709 vmd_bridge->native_dpc = root_bridge->native_dpc;
0710 }
0711
0712 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
0713 {
0714 struct pci_sysdata *sd = &vmd->sysdata;
0715 struct resource *res;
0716 u32 upper_bits;
0717 unsigned long flags;
0718 LIST_HEAD(resources);
0719 resource_size_t offset[2] = {0};
0720 resource_size_t membar2_offset = 0x2000;
0721 struct pci_bus *child;
0722 int ret;
0723
0724
0725
0726
0727
0728
0729
0730 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
0731 membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
0732 ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
0733 if (ret)
0734 return ret;
0735 } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
0736 ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
0737 if (ret)
0738 return ret;
0739 }
0740
0741
0742
0743
0744
0745 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
0746 ret = vmd_get_bus_number_start(vmd);
0747 if (ret)
0748 return ret;
0749 }
0750
0751 res = &vmd->dev->resource[VMD_CFGBAR];
0752 vmd->resources[0] = (struct resource) {
0753 .name = "VMD CFGBAR",
0754 .start = vmd->busn_start,
0755 .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
0756 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
0757 };
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 res = &vmd->dev->resource[VMD_MEMBAR1];
0777 upper_bits = upper_32_bits(res->end);
0778 flags = res->flags & ~IORESOURCE_SIZEALIGN;
0779 if (!upper_bits)
0780 flags &= ~IORESOURCE_MEM_64;
0781 vmd->resources[1] = (struct resource) {
0782 .name = "VMD MEMBAR1",
0783 .start = res->start,
0784 .end = res->end,
0785 .flags = flags,
0786 .parent = res,
0787 };
0788
0789 res = &vmd->dev->resource[VMD_MEMBAR2];
0790 upper_bits = upper_32_bits(res->end);
0791 flags = res->flags & ~IORESOURCE_SIZEALIGN;
0792 if (!upper_bits)
0793 flags &= ~IORESOURCE_MEM_64;
0794 vmd->resources[2] = (struct resource) {
0795 .name = "VMD MEMBAR2",
0796 .start = res->start + membar2_offset,
0797 .end = res->end,
0798 .flags = flags,
0799 .parent = res,
0800 };
0801
0802 sd->vmd_dev = vmd->dev;
0803 sd->domain = vmd_find_free_domain();
0804 if (sd->domain < 0)
0805 return sd->domain;
0806
0807 sd->node = pcibus_to_node(vmd->dev->bus);
0808
0809
0810
0811
0812
0813
0814
0815 if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
0816 offset[0] || offset[1]) {
0817 ret = vmd_alloc_irqs(vmd);
0818 if (ret)
0819 return ret;
0820
0821 vmd_set_msi_remapping(vmd, true);
0822
0823 ret = vmd_create_irq_domain(vmd);
0824 if (ret)
0825 return ret;
0826
0827
0828
0829
0830
0831 irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
0832 } else {
0833 vmd_set_msi_remapping(vmd, false);
0834 }
0835
0836 pci_add_resource(&resources, &vmd->resources[0]);
0837 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
0838 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
0839
0840 vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
0841 &vmd_ops, sd, &resources);
0842 if (!vmd->bus) {
0843 pci_free_resource_list(&resources);
0844 vmd_remove_irq_domain(vmd);
0845 return -ENODEV;
0846 }
0847
0848 vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus),
0849 to_pci_host_bridge(vmd->bus->bridge));
0850
0851 vmd_attach_resources(vmd);
0852 if (vmd->irq_domain)
0853 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
0854 else
0855 dev_set_msi_domain(&vmd->bus->dev,
0856 dev_get_msi_domain(&vmd->dev->dev));
0857
0858 vmd_acpi_begin();
0859
0860 pci_scan_child_bus(vmd->bus);
0861 vmd_domain_reset(vmd);
0862 list_for_each_entry(child, &vmd->bus->children, node)
0863 pci_reset_bus(child->self);
0864 pci_assign_unassigned_bus_resources(vmd->bus);
0865
0866
0867
0868
0869
0870
0871 list_for_each_entry(child, &vmd->bus->children, node)
0872 pcie_bus_configure_settings(child);
0873
0874 pci_bus_add_devices(vmd->bus);
0875
0876 vmd_acpi_end();
0877
0878 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
0879 "domain"), "Can't create symlink to domain\n");
0880 return 0;
0881 }
0882
0883 static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
0884 {
0885 unsigned long features = (unsigned long) id->driver_data;
0886 struct vmd_dev *vmd;
0887 int err;
0888
0889 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
0890 return -ENOMEM;
0891
0892 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
0893 if (!vmd)
0894 return -ENOMEM;
0895
0896 vmd->dev = dev;
0897 vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
0898 if (vmd->instance < 0)
0899 return vmd->instance;
0900
0901 vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d",
0902 vmd->instance);
0903 if (!vmd->name) {
0904 err = -ENOMEM;
0905 goto out_release_instance;
0906 }
0907
0908 err = pcim_enable_device(dev);
0909 if (err < 0)
0910 goto out_release_instance;
0911
0912 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
0913 if (!vmd->cfgbar) {
0914 err = -ENOMEM;
0915 goto out_release_instance;
0916 }
0917
0918 pci_set_master(dev);
0919 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
0920 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
0921 err = -ENODEV;
0922 goto out_release_instance;
0923 }
0924
0925 if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
0926 vmd->first_vec = 1;
0927
0928 spin_lock_init(&vmd->cfg_lock);
0929 pci_set_drvdata(dev, vmd);
0930 err = vmd_enable_domain(vmd, features);
0931 if (err)
0932 goto out_release_instance;
0933
0934 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
0935 vmd->sysdata.domain);
0936 return 0;
0937
0938 out_release_instance:
0939 ida_simple_remove(&vmd_instance_ida, vmd->instance);
0940 return err;
0941 }
0942
0943 static void vmd_cleanup_srcu(struct vmd_dev *vmd)
0944 {
0945 int i;
0946
0947 for (i = 0; i < vmd->msix_count; i++)
0948 cleanup_srcu_struct(&vmd->irqs[i].srcu);
0949 }
0950
0951 static void vmd_remove(struct pci_dev *dev)
0952 {
0953 struct vmd_dev *vmd = pci_get_drvdata(dev);
0954
0955 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
0956 pci_stop_root_bus(vmd->bus);
0957 pci_remove_root_bus(vmd->bus);
0958 vmd_cleanup_srcu(vmd);
0959 vmd_detach_resources(vmd);
0960 vmd_remove_irq_domain(vmd);
0961 ida_simple_remove(&vmd_instance_ida, vmd->instance);
0962 }
0963
0964 #ifdef CONFIG_PM_SLEEP
0965 static int vmd_suspend(struct device *dev)
0966 {
0967 struct pci_dev *pdev = to_pci_dev(dev);
0968 struct vmd_dev *vmd = pci_get_drvdata(pdev);
0969 int i;
0970
0971 for (i = 0; i < vmd->msix_count; i++)
0972 devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
0973
0974 return 0;
0975 }
0976
0977 static int vmd_resume(struct device *dev)
0978 {
0979 struct pci_dev *pdev = to_pci_dev(dev);
0980 struct vmd_dev *vmd = pci_get_drvdata(pdev);
0981 int err, i;
0982
0983 for (i = 0; i < vmd->msix_count; i++) {
0984 err = devm_request_irq(dev, vmd->irqs[i].virq,
0985 vmd_irq, IRQF_NO_THREAD,
0986 vmd->name, &vmd->irqs[i]);
0987 if (err)
0988 return err;
0989 }
0990
0991 return 0;
0992 }
0993 #endif
0994 static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
0995
0996 static const struct pci_device_id vmd_ids[] = {
0997 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
0998 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
0999 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
1000 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
1001 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1002 VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
1003 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
1004 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
1005 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1006 VMD_FEAT_OFFSET_FIRST_VECTOR,},
1007 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
1008 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
1009 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1010 VMD_FEAT_OFFSET_FIRST_VECTOR,},
1011 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f),
1012 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
1013 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1014 VMD_FEAT_OFFSET_FIRST_VECTOR,},
1015 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
1016 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
1017 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1018 VMD_FEAT_OFFSET_FIRST_VECTOR,},
1019 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
1020 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
1021 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1022 VMD_FEAT_OFFSET_FIRST_VECTOR,},
1023 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
1024 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
1025 VMD_FEAT_HAS_BUS_RESTRICTIONS |
1026 VMD_FEAT_OFFSET_FIRST_VECTOR,},
1027 {0,}
1028 };
1029 MODULE_DEVICE_TABLE(pci, vmd_ids);
1030
1031 static struct pci_driver vmd_drv = {
1032 .name = "vmd",
1033 .id_table = vmd_ids,
1034 .probe = vmd_probe,
1035 .remove = vmd_remove,
1036 .driver = {
1037 .pm = &vmd_dev_pm_ops,
1038 },
1039 };
1040 module_pci_driver(vmd_drv);
1041
1042 MODULE_AUTHOR("Intel Corporation");
1043 MODULE_LICENSE("GPL v2");
1044 MODULE_VERSION("0.6");