0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #define KMSG_COMPONENT "zpci"
0020 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0021
0022 #include <linux/kernel.h>
0023 #include <linux/slab.h>
0024 #include <linux/err.h>
0025 #include <linux/export.h>
0026 #include <linux/delay.h>
0027 #include <linux/seq_file.h>
0028 #include <linux/jump_label.h>
0029 #include <linux/pci.h>
0030 #include <linux/printk.h>
0031
0032 #include <asm/isc.h>
0033 #include <asm/airq.h>
0034 #include <asm/facility.h>
0035 #include <asm/pci_insn.h>
0036 #include <asm/pci_clp.h>
0037 #include <asm/pci_dma.h>
0038
0039 #include "pci_bus.h"
0040 #include "pci_iov.h"
0041
0042
0043 static LIST_HEAD(zpci_list);
0044 static DEFINE_SPINLOCK(zpci_list_lock);
0045
0046 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
0047 static DEFINE_SPINLOCK(zpci_domain_lock);
0048
0049 #define ZPCI_IOMAP_ENTRIES \
0050 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
0051 ZPCI_IOMAP_MAX_ENTRIES)
0052
0053 unsigned int s390_pci_no_rid;
0054
0055 static DEFINE_SPINLOCK(zpci_iomap_lock);
0056 static unsigned long *zpci_iomap_bitmap;
0057 struct zpci_iomap_entry *zpci_iomap_start;
0058 EXPORT_SYMBOL_GPL(zpci_iomap_start);
0059
0060 DEFINE_STATIC_KEY_FALSE(have_mio);
0061
0062 static struct kmem_cache *zdev_fmb_cache;
0063
0064
0065 union zpci_sic_iib *zpci_aipb;
0066 EXPORT_SYMBOL_GPL(zpci_aipb);
0067 struct airq_iv *zpci_aif_sbv;
0068 EXPORT_SYMBOL_GPL(zpci_aif_sbv);
0069
0070 struct zpci_dev *get_zdev_by_fid(u32 fid)
0071 {
0072 struct zpci_dev *tmp, *zdev = NULL;
0073
0074 spin_lock(&zpci_list_lock);
0075 list_for_each_entry(tmp, &zpci_list, entry) {
0076 if (tmp->fid == fid) {
0077 zdev = tmp;
0078 zpci_zdev_get(zdev);
0079 break;
0080 }
0081 }
0082 spin_unlock(&zpci_list_lock);
0083 return zdev;
0084 }
0085
0086 void zpci_remove_reserved_devices(void)
0087 {
0088 struct zpci_dev *tmp, *zdev;
0089 enum zpci_state state;
0090 LIST_HEAD(remove);
0091
0092 spin_lock(&zpci_list_lock);
0093 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
0094 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
0095 !clp_get_state(zdev->fid, &state) &&
0096 state == ZPCI_FN_STATE_RESERVED)
0097 list_move_tail(&zdev->entry, &remove);
0098 }
0099 spin_unlock(&zpci_list_lock);
0100
0101 list_for_each_entry_safe(zdev, tmp, &remove, entry)
0102 zpci_device_reserved(zdev);
0103 }
0104
0105 int pci_domain_nr(struct pci_bus *bus)
0106 {
0107 return ((struct zpci_bus *) bus->sysdata)->domain_nr;
0108 }
0109 EXPORT_SYMBOL_GPL(pci_domain_nr);
0110
0111 int pci_proc_domain(struct pci_bus *bus)
0112 {
0113 return pci_domain_nr(bus);
0114 }
0115 EXPORT_SYMBOL_GPL(pci_proc_domain);
0116
0117
0118 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
0119 u64 base, u64 limit, u64 iota)
0120 {
0121 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
0122 struct zpci_fib fib = {0};
0123 u8 cc, status;
0124
0125 WARN_ON_ONCE(iota & 0x3fff);
0126 fib.pba = base;
0127 fib.pal = limit;
0128 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
0129 fib.gd = zdev->gisa;
0130 cc = zpci_mod_fc(req, &fib, &status);
0131 if (cc)
0132 zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
0133 return cc;
0134 }
0135 EXPORT_SYMBOL_GPL(zpci_register_ioat);
0136
0137
0138 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
0139 {
0140 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
0141 struct zpci_fib fib = {0};
0142 u8 cc, status;
0143
0144 fib.gd = zdev->gisa;
0145
0146 cc = zpci_mod_fc(req, &fib, &status);
0147 if (cc)
0148 zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
0149 return cc;
0150 }
0151
0152
0153 int zpci_fmb_enable_device(struct zpci_dev *zdev)
0154 {
0155 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
0156 struct zpci_fib fib = {0};
0157 u8 cc, status;
0158
0159 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
0160 return -EINVAL;
0161
0162 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
0163 if (!zdev->fmb)
0164 return -ENOMEM;
0165 WARN_ON((u64) zdev->fmb & 0xf);
0166
0167
0168 atomic64_set(&zdev->allocated_pages, 0);
0169 atomic64_set(&zdev->mapped_pages, 0);
0170 atomic64_set(&zdev->unmapped_pages, 0);
0171
0172 fib.fmb_addr = virt_to_phys(zdev->fmb);
0173 fib.gd = zdev->gisa;
0174 cc = zpci_mod_fc(req, &fib, &status);
0175 if (cc) {
0176 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
0177 zdev->fmb = NULL;
0178 }
0179 return cc ? -EIO : 0;
0180 }
0181
0182
0183 int zpci_fmb_disable_device(struct zpci_dev *zdev)
0184 {
0185 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
0186 struct zpci_fib fib = {0};
0187 u8 cc, status;
0188
0189 if (!zdev->fmb)
0190 return -EINVAL;
0191
0192 fib.gd = zdev->gisa;
0193
0194
0195 cc = zpci_mod_fc(req, &fib, &status);
0196 if (cc == 3)
0197 cc = 0;
0198
0199 if (!cc) {
0200 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
0201 zdev->fmb = NULL;
0202 }
0203 return cc ? -EIO : 0;
0204 }
0205
0206 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
0207 {
0208 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
0209 u64 data;
0210 int rc;
0211
0212 rc = __zpci_load(&data, req, offset);
0213 if (!rc) {
0214 data = le64_to_cpu((__force __le64) data);
0215 data >>= (8 - len) * 8;
0216 *val = (u32) data;
0217 } else
0218 *val = 0xffffffff;
0219 return rc;
0220 }
0221
0222 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
0223 {
0224 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
0225 u64 data = val;
0226 int rc;
0227
0228 data <<= (8 - len) * 8;
0229 data = (__force u64) cpu_to_le64(data);
0230 rc = __zpci_store(data, req, offset);
0231 return rc;
0232 }
0233
0234 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
0235 resource_size_t size,
0236 resource_size_t align)
0237 {
0238 return 0;
0239 }
0240
0241
0242 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
0243 {
0244 zpci_memcpy_toio(to, from, count);
0245 }
0246
0247 static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
0248 {
0249 unsigned long offset, vaddr;
0250 struct vm_struct *area;
0251 phys_addr_t last_addr;
0252
0253 last_addr = addr + size - 1;
0254 if (!size || last_addr < addr)
0255 return NULL;
0256
0257 if (!static_branch_unlikely(&have_mio))
0258 return (void __iomem *) addr;
0259
0260 offset = addr & ~PAGE_MASK;
0261 addr &= PAGE_MASK;
0262 size = PAGE_ALIGN(size + offset);
0263 area = get_vm_area(size, VM_IOREMAP);
0264 if (!area)
0265 return NULL;
0266
0267 vaddr = (unsigned long) area->addr;
0268 if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
0269 free_vm_area(area);
0270 return NULL;
0271 }
0272 return (void __iomem *) ((unsigned long) area->addr + offset);
0273 }
0274
0275 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
0276 {
0277 return __ioremap(addr, size, __pgprot(prot));
0278 }
0279 EXPORT_SYMBOL(ioremap_prot);
0280
0281 void __iomem *ioremap(phys_addr_t addr, size_t size)
0282 {
0283 return __ioremap(addr, size, PAGE_KERNEL);
0284 }
0285 EXPORT_SYMBOL(ioremap);
0286
0287 void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
0288 {
0289 return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
0290 }
0291 EXPORT_SYMBOL(ioremap_wc);
0292
0293 void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
0294 {
0295 return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
0296 }
0297 EXPORT_SYMBOL(ioremap_wt);
0298
0299 void iounmap(volatile void __iomem *addr)
0300 {
0301 if (static_branch_likely(&have_mio))
0302 vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
0303 }
0304 EXPORT_SYMBOL(iounmap);
0305
0306
0307 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
0308 unsigned long offset, unsigned long max)
0309 {
0310 struct zpci_dev *zdev = to_zpci(pdev);
0311 int idx;
0312
0313 idx = zdev->bars[bar].map_idx;
0314 spin_lock(&zpci_iomap_lock);
0315
0316 WARN_ON(!++zpci_iomap_start[idx].count);
0317 zpci_iomap_start[idx].fh = zdev->fh;
0318 zpci_iomap_start[idx].bar = bar;
0319 spin_unlock(&zpci_iomap_lock);
0320
0321 return (void __iomem *) ZPCI_ADDR(idx) + offset;
0322 }
0323
0324 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
0325 unsigned long offset,
0326 unsigned long max)
0327 {
0328 unsigned long barsize = pci_resource_len(pdev, bar);
0329 struct zpci_dev *zdev = to_zpci(pdev);
0330 void __iomem *iova;
0331
0332 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
0333 return iova ? iova + offset : iova;
0334 }
0335
0336 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
0337 unsigned long offset, unsigned long max)
0338 {
0339 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
0340 return NULL;
0341
0342 if (static_branch_likely(&have_mio))
0343 return pci_iomap_range_mio(pdev, bar, offset, max);
0344 else
0345 return pci_iomap_range_fh(pdev, bar, offset, max);
0346 }
0347 EXPORT_SYMBOL(pci_iomap_range);
0348
0349 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
0350 {
0351 return pci_iomap_range(dev, bar, 0, maxlen);
0352 }
0353 EXPORT_SYMBOL(pci_iomap);
0354
0355 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
0356 unsigned long offset, unsigned long max)
0357 {
0358 unsigned long barsize = pci_resource_len(pdev, bar);
0359 struct zpci_dev *zdev = to_zpci(pdev);
0360 void __iomem *iova;
0361
0362 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
0363 return iova ? iova + offset : iova;
0364 }
0365
0366 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
0367 unsigned long offset, unsigned long max)
0368 {
0369 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
0370 return NULL;
0371
0372 if (static_branch_likely(&have_mio))
0373 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
0374 else
0375 return pci_iomap_range_fh(pdev, bar, offset, max);
0376 }
0377 EXPORT_SYMBOL(pci_iomap_wc_range);
0378
0379 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
0380 {
0381 return pci_iomap_wc_range(dev, bar, 0, maxlen);
0382 }
0383 EXPORT_SYMBOL(pci_iomap_wc);
0384
0385 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
0386 {
0387 unsigned int idx = ZPCI_IDX(addr);
0388
0389 spin_lock(&zpci_iomap_lock);
0390
0391 WARN_ON(!zpci_iomap_start[idx].count);
0392 if (!--zpci_iomap_start[idx].count) {
0393 zpci_iomap_start[idx].fh = 0;
0394 zpci_iomap_start[idx].bar = 0;
0395 }
0396 spin_unlock(&zpci_iomap_lock);
0397 }
0398
0399 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
0400 {
0401 iounmap(addr);
0402 }
0403
0404 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
0405 {
0406 if (static_branch_likely(&have_mio))
0407 pci_iounmap_mio(pdev, addr);
0408 else
0409 pci_iounmap_fh(pdev, addr);
0410 }
0411 EXPORT_SYMBOL(pci_iounmap);
0412
0413 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
0414 int size, u32 *val)
0415 {
0416 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
0417
0418 return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
0419 }
0420
0421 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
0422 int size, u32 val)
0423 {
0424 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
0425
0426 return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
0427 }
0428
0429 static struct pci_ops pci_root_ops = {
0430 .read = pci_read,
0431 .write = pci_write,
0432 };
0433
0434 static void zpci_map_resources(struct pci_dev *pdev)
0435 {
0436 struct zpci_dev *zdev = to_zpci(pdev);
0437 resource_size_t len;
0438 int i;
0439
0440 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0441 len = pci_resource_len(pdev, i);
0442 if (!len)
0443 continue;
0444
0445 if (zpci_use_mio(zdev))
0446 pdev->resource[i].start =
0447 (resource_size_t __force) zdev->bars[i].mio_wt;
0448 else
0449 pdev->resource[i].start = (resource_size_t __force)
0450 pci_iomap_range_fh(pdev, i, 0, 0);
0451 pdev->resource[i].end = pdev->resource[i].start + len - 1;
0452 }
0453
0454 zpci_iov_map_resources(pdev);
0455 }
0456
0457 static void zpci_unmap_resources(struct pci_dev *pdev)
0458 {
0459 struct zpci_dev *zdev = to_zpci(pdev);
0460 resource_size_t len;
0461 int i;
0462
0463 if (zpci_use_mio(zdev))
0464 return;
0465
0466 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0467 len = pci_resource_len(pdev, i);
0468 if (!len)
0469 continue;
0470 pci_iounmap_fh(pdev, (void __iomem __force *)
0471 pdev->resource[i].start);
0472 }
0473 }
0474
0475 static int zpci_alloc_iomap(struct zpci_dev *zdev)
0476 {
0477 unsigned long entry;
0478
0479 spin_lock(&zpci_iomap_lock);
0480 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
0481 if (entry == ZPCI_IOMAP_ENTRIES) {
0482 spin_unlock(&zpci_iomap_lock);
0483 return -ENOSPC;
0484 }
0485 set_bit(entry, zpci_iomap_bitmap);
0486 spin_unlock(&zpci_iomap_lock);
0487 return entry;
0488 }
0489
0490 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
0491 {
0492 spin_lock(&zpci_iomap_lock);
0493 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
0494 clear_bit(entry, zpci_iomap_bitmap);
0495 spin_unlock(&zpci_iomap_lock);
0496 }
0497
0498 static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
0499 {
0500 int bar, idx;
0501
0502 spin_lock(&zpci_iomap_lock);
0503 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
0504 if (!zdev->bars[bar].size)
0505 continue;
0506 idx = zdev->bars[bar].map_idx;
0507 if (!zpci_iomap_start[idx].count)
0508 continue;
0509 WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
0510 }
0511 spin_unlock(&zpci_iomap_lock);
0512 }
0513
0514 void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
0515 {
0516 if (!fh || zdev->fh == fh)
0517 return;
0518
0519 zdev->fh = fh;
0520 if (zpci_use_mio(zdev))
0521 return;
0522 if (zdev->has_resources && zdev_enabled(zdev))
0523 zpci_do_update_iomap_fh(zdev, fh);
0524 }
0525
0526 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
0527 unsigned long size, unsigned long flags)
0528 {
0529 struct resource *r;
0530
0531 r = kzalloc(sizeof(*r), GFP_KERNEL);
0532 if (!r)
0533 return NULL;
0534
0535 r->start = start;
0536 r->end = r->start + size - 1;
0537 r->flags = flags;
0538 r->name = zdev->res_name;
0539
0540 if (request_resource(&iomem_resource, r)) {
0541 kfree(r);
0542 return NULL;
0543 }
0544 return r;
0545 }
0546
0547 int zpci_setup_bus_resources(struct zpci_dev *zdev,
0548 struct list_head *resources)
0549 {
0550 unsigned long addr, size, flags;
0551 struct resource *res;
0552 int i, entry;
0553
0554 snprintf(zdev->res_name, sizeof(zdev->res_name),
0555 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
0556
0557 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0558 if (!zdev->bars[i].size)
0559 continue;
0560 entry = zpci_alloc_iomap(zdev);
0561 if (entry < 0)
0562 return entry;
0563 zdev->bars[i].map_idx = entry;
0564
0565
0566 flags = IORESOURCE_MEM;
0567 if (zdev->bars[i].val & 8)
0568 flags |= IORESOURCE_PREFETCH;
0569 if (zdev->bars[i].val & 4)
0570 flags |= IORESOURCE_MEM_64;
0571
0572 if (zpci_use_mio(zdev))
0573 addr = (unsigned long) zdev->bars[i].mio_wt;
0574 else
0575 addr = ZPCI_ADDR(entry);
0576 size = 1UL << zdev->bars[i].size;
0577
0578 res = __alloc_res(zdev, addr, size, flags);
0579 if (!res) {
0580 zpci_free_iomap(zdev, entry);
0581 return -ENOMEM;
0582 }
0583 zdev->bars[i].res = res;
0584 pci_add_resource(resources, res);
0585 }
0586 zdev->has_resources = 1;
0587
0588 return 0;
0589 }
0590
0591 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
0592 {
0593 int i;
0594
0595 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0596 if (!zdev->bars[i].size || !zdev->bars[i].res)
0597 continue;
0598
0599 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
0600 release_resource(zdev->bars[i].res);
0601 kfree(zdev->bars[i].res);
0602 }
0603 zdev->has_resources = 0;
0604 }
0605
0606 int pcibios_device_add(struct pci_dev *pdev)
0607 {
0608 struct zpci_dev *zdev = to_zpci(pdev);
0609 struct resource *res;
0610 int i;
0611
0612
0613 zpci_zdev_get(zdev);
0614 if (pdev->is_physfn)
0615 pdev->no_vf_scan = 1;
0616
0617 pdev->dev.groups = zpci_attr_groups;
0618 pdev->dev.dma_ops = &s390_pci_dma_ops;
0619 zpci_map_resources(pdev);
0620
0621 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0622 res = &pdev->resource[i];
0623 if (res->parent || !res->flags)
0624 continue;
0625 pci_claim_resource(pdev, i);
0626 }
0627
0628 return 0;
0629 }
0630
0631 void pcibios_release_device(struct pci_dev *pdev)
0632 {
0633 struct zpci_dev *zdev = to_zpci(pdev);
0634
0635 zpci_unmap_resources(pdev);
0636 zpci_zdev_put(zdev);
0637 }
0638
0639 int pcibios_enable_device(struct pci_dev *pdev, int mask)
0640 {
0641 struct zpci_dev *zdev = to_zpci(pdev);
0642
0643 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
0644 zpci_fmb_enable_device(zdev);
0645
0646 return pci_enable_resources(pdev, mask);
0647 }
0648
0649 void pcibios_disable_device(struct pci_dev *pdev)
0650 {
0651 struct zpci_dev *zdev = to_zpci(pdev);
0652
0653 zpci_fmb_disable_device(zdev);
0654 zpci_debug_exit_device(zdev);
0655 }
0656
0657 static int __zpci_register_domain(int domain)
0658 {
0659 spin_lock(&zpci_domain_lock);
0660 if (test_bit(domain, zpci_domain)) {
0661 spin_unlock(&zpci_domain_lock);
0662 pr_err("Domain %04x is already assigned\n", domain);
0663 return -EEXIST;
0664 }
0665 set_bit(domain, zpci_domain);
0666 spin_unlock(&zpci_domain_lock);
0667 return domain;
0668 }
0669
0670 static int __zpci_alloc_domain(void)
0671 {
0672 int domain;
0673
0674 spin_lock(&zpci_domain_lock);
0675
0676
0677
0678
0679
0680 domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
0681 set_bit(domain, zpci_domain);
0682 spin_unlock(&zpci_domain_lock);
0683 return domain;
0684 }
0685
0686 int zpci_alloc_domain(int domain)
0687 {
0688 if (zpci_unique_uid) {
0689 if (domain)
0690 return __zpci_register_domain(domain);
0691 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
0692 update_uid_checking(false);
0693 }
0694 return __zpci_alloc_domain();
0695 }
0696
0697 void zpci_free_domain(int domain)
0698 {
0699 spin_lock(&zpci_domain_lock);
0700 clear_bit(domain, zpci_domain);
0701 spin_unlock(&zpci_domain_lock);
0702 }
0703
0704
0705 int zpci_enable_device(struct zpci_dev *zdev)
0706 {
0707 u32 fh = zdev->fh;
0708 int rc = 0;
0709
0710 if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
0711 rc = -EIO;
0712 else
0713 zpci_update_fh(zdev, fh);
0714 return rc;
0715 }
0716 EXPORT_SYMBOL_GPL(zpci_enable_device);
0717
0718 int zpci_disable_device(struct zpci_dev *zdev)
0719 {
0720 u32 fh = zdev->fh;
0721 int cc, rc = 0;
0722
0723 cc = clp_disable_fh(zdev, &fh);
0724 if (!cc) {
0725 zpci_update_fh(zdev, fh);
0726 } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
0727 pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
0728 zdev->fid);
0729
0730 rc = clp_refresh_fh(zdev->fid, &fh);
0731 if (!rc) {
0732 zpci_update_fh(zdev, fh);
0733 rc = -EINVAL;
0734 }
0735 } else {
0736 rc = -EIO;
0737 }
0738 return rc;
0739 }
0740 EXPORT_SYMBOL_GPL(zpci_disable_device);
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765 int zpci_hot_reset_device(struct zpci_dev *zdev)
0766 {
0767 int rc;
0768
0769 zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
0770 if (zdev_enabled(zdev)) {
0771
0772 rc = zpci_disable_device(zdev);
0773
0774
0775
0776
0777
0778 if (rc == -EINVAL)
0779 rc = 0;
0780 if (rc)
0781 return rc;
0782 }
0783
0784 rc = zpci_enable_device(zdev);
0785 if (rc)
0786 return rc;
0787
0788 if (zdev->dma_table)
0789 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
0790 virt_to_phys(zdev->dma_table));
0791 else
0792 rc = zpci_dma_init_device(zdev);
0793 if (rc) {
0794 zpci_disable_device(zdev);
0795 return rc;
0796 }
0797
0798 return 0;
0799 }
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812 struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
0813 {
0814 struct zpci_dev *zdev;
0815 int rc;
0816
0817 zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
0818 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
0819 if (!zdev)
0820 return ERR_PTR(-ENOMEM);
0821
0822
0823 zdev->fid = fid;
0824 zdev->fh = fh;
0825
0826
0827 rc = clp_query_pci_fn(zdev);
0828 if (rc)
0829 goto error;
0830 zdev->state = state;
0831
0832 kref_init(&zdev->kref);
0833 mutex_init(&zdev->lock);
0834 mutex_init(&zdev->kzdev_lock);
0835
0836 rc = zpci_init_iommu(zdev);
0837 if (rc)
0838 goto error;
0839
0840 rc = zpci_bus_device_register(zdev, &pci_root_ops);
0841 if (rc)
0842 goto error_destroy_iommu;
0843
0844 spin_lock(&zpci_list_lock);
0845 list_add_tail(&zdev->entry, &zpci_list);
0846 spin_unlock(&zpci_list_lock);
0847
0848 return zdev;
0849
0850 error_destroy_iommu:
0851 zpci_destroy_iommu(zdev);
0852 error:
0853 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
0854 kfree(zdev);
0855 return ERR_PTR(rc);
0856 }
0857
0858 bool zpci_is_device_configured(struct zpci_dev *zdev)
0859 {
0860 enum zpci_state state = zdev->state;
0861
0862 return state != ZPCI_FN_STATE_RESERVED &&
0863 state != ZPCI_FN_STATE_STANDBY;
0864 }
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
0880 {
0881 int rc;
0882
0883 zpci_update_fh(zdev, fh);
0884
0885 if (!zdev->zbus->bus)
0886 return 0;
0887
0888
0889
0890
0891
0892 if (zdev->devfn == 0 && zdev->zbus->multifunction)
0893 rc = zpci_bus_scan_bus(zdev->zbus);
0894 else
0895 rc = zpci_bus_scan_device(zdev);
0896
0897 return rc;
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910 int zpci_deconfigure_device(struct zpci_dev *zdev)
0911 {
0912 int rc;
0913
0914 if (zdev->zbus->bus)
0915 zpci_bus_remove_device(zdev, false);
0916
0917 if (zdev->dma_table) {
0918 rc = zpci_dma_exit_device(zdev);
0919 if (rc)
0920 return rc;
0921 }
0922 if (zdev_enabled(zdev)) {
0923 rc = zpci_disable_device(zdev);
0924 if (rc)
0925 return rc;
0926 }
0927
0928 rc = sclp_pci_deconfigure(zdev->fid);
0929 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
0930 if (rc)
0931 return rc;
0932 zdev->state = ZPCI_FN_STATE_STANDBY;
0933
0934 return 0;
0935 }
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946 void zpci_device_reserved(struct zpci_dev *zdev)
0947 {
0948 if (zdev->has_hp_slot)
0949 zpci_exit_slot(zdev);
0950
0951
0952
0953
0954 spin_lock(&zpci_list_lock);
0955 list_del(&zdev->entry);
0956 spin_unlock(&zpci_list_lock);
0957 zdev->state = ZPCI_FN_STATE_RESERVED;
0958 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
0959 zpci_zdev_put(zdev);
0960 }
0961
0962 void zpci_release_device(struct kref *kref)
0963 {
0964 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
0965 int ret;
0966
0967 if (zdev->zbus->bus)
0968 zpci_bus_remove_device(zdev, false);
0969
0970 if (zdev->dma_table)
0971 zpci_dma_exit_device(zdev);
0972 if (zdev_enabled(zdev))
0973 zpci_disable_device(zdev);
0974
0975 switch (zdev->state) {
0976 case ZPCI_FN_STATE_CONFIGURED:
0977 ret = sclp_pci_deconfigure(zdev->fid);
0978 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
0979 fallthrough;
0980 case ZPCI_FN_STATE_STANDBY:
0981 if (zdev->has_hp_slot)
0982 zpci_exit_slot(zdev);
0983 spin_lock(&zpci_list_lock);
0984 list_del(&zdev->entry);
0985 spin_unlock(&zpci_list_lock);
0986 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
0987 fallthrough;
0988 case ZPCI_FN_STATE_RESERVED:
0989 if (zdev->has_resources)
0990 zpci_cleanup_bus_resources(zdev);
0991 zpci_bus_device_unregister(zdev);
0992 zpci_destroy_iommu(zdev);
0993 fallthrough;
0994 default:
0995 break;
0996 }
0997 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
0998 kfree(zdev);
0999 }
1000
1001 int zpci_report_error(struct pci_dev *pdev,
1002 struct zpci_report_error_header *report)
1003 {
1004 struct zpci_dev *zdev = to_zpci(pdev);
1005
1006 return sclp_pci_report(report, zdev->fh, zdev->fid);
1007 }
1008 EXPORT_SYMBOL(zpci_report_error);
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 int zpci_clear_error_state(struct zpci_dev *zdev)
1021 {
1022 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
1023 struct zpci_fib fib = {0};
1024 u8 status;
1025 int cc;
1026
1027 cc = zpci_mod_fc(req, &fib, &status);
1028 if (cc) {
1029 zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1030 return -EIO;
1031 }
1032
1033 return 0;
1034 }
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
1048 {
1049 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1050 struct zpci_fib fib = {0};
1051 u8 status;
1052 int cc;
1053
1054 cc = zpci_mod_fc(req, &fib, &status);
1055 if (cc) {
1056 zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1057 return -EIO;
1058 }
1059
1060 return 0;
1061 }
1062
1063 static int zpci_mem_init(void)
1064 {
1065 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1066 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1067
1068 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1069 __alignof__(struct zpci_fmb), 0, NULL);
1070 if (!zdev_fmb_cache)
1071 goto error_fmb;
1072
1073 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1074 sizeof(*zpci_iomap_start), GFP_KERNEL);
1075 if (!zpci_iomap_start)
1076 goto error_iomap;
1077
1078 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1079 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1080 if (!zpci_iomap_bitmap)
1081 goto error_iomap_bitmap;
1082
1083 if (static_branch_likely(&have_mio))
1084 clp_setup_writeback_mio();
1085
1086 return 0;
1087 error_iomap_bitmap:
1088 kfree(zpci_iomap_start);
1089 error_iomap:
1090 kmem_cache_destroy(zdev_fmb_cache);
1091 error_fmb:
1092 return -ENOMEM;
1093 }
1094
1095 static void zpci_mem_exit(void)
1096 {
1097 kfree(zpci_iomap_bitmap);
1098 kfree(zpci_iomap_start);
1099 kmem_cache_destroy(zdev_fmb_cache);
1100 }
1101
1102 static unsigned int s390_pci_probe __initdata = 1;
1103 unsigned int s390_pci_force_floating __initdata;
1104 static unsigned int s390_pci_initialized;
1105
1106 char * __init pcibios_setup(char *str)
1107 {
1108 if (!strcmp(str, "off")) {
1109 s390_pci_probe = 0;
1110 return NULL;
1111 }
1112 if (!strcmp(str, "nomio")) {
1113 S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
1114 return NULL;
1115 }
1116 if (!strcmp(str, "force_floating")) {
1117 s390_pci_force_floating = 1;
1118 return NULL;
1119 }
1120 if (!strcmp(str, "norid")) {
1121 s390_pci_no_rid = 1;
1122 return NULL;
1123 }
1124 return str;
1125 }
1126
1127 bool zpci_is_enabled(void)
1128 {
1129 return s390_pci_initialized;
1130 }
1131
1132 static int __init pci_base_init(void)
1133 {
1134 int rc;
1135
1136 if (!s390_pci_probe)
1137 return 0;
1138
1139 if (!test_facility(69) || !test_facility(71)) {
1140 pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1141 return 0;
1142 }
1143
1144 if (MACHINE_HAS_PCI_MIO) {
1145 static_branch_enable(&have_mio);
1146 ctl_set_bit(2, 5);
1147 }
1148
1149 rc = zpci_debug_init();
1150 if (rc)
1151 goto out;
1152
1153 rc = zpci_mem_init();
1154 if (rc)
1155 goto out_mem;
1156
1157 rc = zpci_irq_init();
1158 if (rc)
1159 goto out_irq;
1160
1161 rc = zpci_dma_init();
1162 if (rc)
1163 goto out_dma;
1164
1165 rc = clp_scan_pci_devices();
1166 if (rc)
1167 goto out_find;
1168 zpci_bus_scan_busses();
1169
1170 s390_pci_initialized = 1;
1171 return 0;
1172
1173 out_find:
1174 zpci_dma_exit();
1175 out_dma:
1176 zpci_irq_exit();
1177 out_irq:
1178 zpci_mem_exit();
1179 out_mem:
1180 zpci_debug_exit();
1181 out:
1182 return rc;
1183 }
1184 subsys_initcall_sync(pci_base_init);