0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/acpi.h>
0012 #include <linux/kernel.h>
0013 #include <linux/delay.h>
0014 #include <linux/dmi.h>
0015 #include <linux/init.h>
0016 #include <linux/msi.h>
0017 #include <linux/of.h>
0018 #include <linux/pci.h>
0019 #include <linux/pm.h>
0020 #include <linux/slab.h>
0021 #include <linux/module.h>
0022 #include <linux/spinlock.h>
0023 #include <linux/string.h>
0024 #include <linux/log2.h>
0025 #include <linux/logic_pio.h>
0026 #include <linux/pm_wakeup.h>
0027 #include <linux/interrupt.h>
0028 #include <linux/device.h>
0029 #include <linux/pm_runtime.h>
0030 #include <linux/pci_hotplug.h>
0031 #include <linux/vmalloc.h>
0032 #include <asm/dma.h>
0033 #include <linux/aer.h>
0034 #include <linux/bitfield.h>
0035 #include "pci.h"
0036
0037 DEFINE_MUTEX(pci_slot_mutex);
0038
0039 const char *pci_power_names[] = {
0040 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
0041 };
0042 EXPORT_SYMBOL_GPL(pci_power_names);
0043
0044 #ifdef CONFIG_X86_32
0045 int isa_dma_bridge_buggy;
0046 EXPORT_SYMBOL(isa_dma_bridge_buggy);
0047 #endif
0048
0049 int pci_pci_problems;
0050 EXPORT_SYMBOL(pci_pci_problems);
0051
0052 unsigned int pci_pm_d3hot_delay;
0053
0054 static void pci_pme_list_scan(struct work_struct *work);
0055
0056 static LIST_HEAD(pci_pme_list);
0057 static DEFINE_MUTEX(pci_pme_list_mutex);
0058 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
0059
0060 struct pci_pme_device {
0061 struct list_head list;
0062 struct pci_dev *dev;
0063 };
0064
0065 #define PME_TIMEOUT 1000
0066
0067 static void pci_dev_d3_sleep(struct pci_dev *dev)
0068 {
0069 unsigned int delay = dev->d3hot_delay;
0070
0071 if (delay < pci_pm_d3hot_delay)
0072 delay = pci_pm_d3hot_delay;
0073
0074 if (delay)
0075 msleep(delay);
0076 }
0077
0078 bool pci_reset_supported(struct pci_dev *dev)
0079 {
0080 return dev->reset_methods[0] != 0;
0081 }
0082
0083 #ifdef CONFIG_PCI_DOMAINS
0084 int pci_domains_supported = 1;
0085 #endif
0086
0087 #define DEFAULT_CARDBUS_IO_SIZE (256)
0088 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
0089
0090 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
0091 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
0092
0093 #define DEFAULT_HOTPLUG_IO_SIZE (256)
0094 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
0095 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
0096
0097 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
0098
0099
0100
0101
0102
0103 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
0104 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
0105
0106 #define DEFAULT_HOTPLUG_BUS_SIZE 1
0107 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
0108
0109
0110
0111 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
0112 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
0113 #elif defined CONFIG_PCIE_BUS_SAFE
0114 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
0115 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
0116 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
0117 #elif defined CONFIG_PCIE_BUS_PEER2PEER
0118 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
0119 #else
0120 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
0121 #endif
0122
0123
0124
0125
0126
0127
0128
0129 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
0130 u8 pci_cache_line_size;
0131
0132
0133
0134
0135
0136 unsigned int pcibios_max_latency = 255;
0137
0138
0139 static bool pcie_ari_disabled;
0140
0141
0142 static bool pcie_ats_disabled;
0143
0144
0145 bool pci_early_dump;
0146
0147 bool pci_ats_disabled(void)
0148 {
0149 return pcie_ats_disabled;
0150 }
0151 EXPORT_SYMBOL_GPL(pci_ats_disabled);
0152
0153
0154 static bool pci_bridge_d3_disable;
0155
0156 static bool pci_bridge_d3_force;
0157
0158 static int __init pcie_port_pm_setup(char *str)
0159 {
0160 if (!strcmp(str, "off"))
0161 pci_bridge_d3_disable = true;
0162 else if (!strcmp(str, "force"))
0163 pci_bridge_d3_force = true;
0164 return 1;
0165 }
0166 __setup("pcie_port_pm=", pcie_port_pm_setup);
0167
0168
0169 #define PCIE_RESET_READY_POLL_MS 60000
0170
0171
0172
0173
0174
0175
0176
0177
0178 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
0179 {
0180 struct pci_bus *tmp;
0181 unsigned char max, n;
0182
0183 max = bus->busn_res.end;
0184 list_for_each_entry(tmp, &bus->children, node) {
0185 n = pci_bus_max_busnr(tmp);
0186 if (n > max)
0187 max = n;
0188 }
0189 return max;
0190 }
0191 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
0192
0193
0194
0195
0196
0197
0198
0199 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
0200 {
0201 u16 status;
0202 int ret;
0203
0204 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
0205 if (ret != PCIBIOS_SUCCESSFUL)
0206 return -EIO;
0207
0208 status &= PCI_STATUS_ERROR_BITS;
0209 if (status)
0210 pci_write_config_word(pdev, PCI_STATUS, status);
0211
0212 return status;
0213 }
0214 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
0215
0216 #ifdef CONFIG_HAS_IOMEM
0217 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
0218 bool write_combine)
0219 {
0220 struct resource *res = &pdev->resource[bar];
0221 resource_size_t start = res->start;
0222 resource_size_t size = resource_size(res);
0223
0224
0225
0226
0227 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
0228 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
0229 return NULL;
0230 }
0231
0232 if (write_combine)
0233 return ioremap_wc(start, size);
0234
0235 return ioremap(start, size);
0236 }
0237
0238 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
0239 {
0240 return __pci_ioremap_resource(pdev, bar, false);
0241 }
0242 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
0243
0244 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
0245 {
0246 return __pci_ioremap_resource(pdev, bar, true);
0247 }
0248 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
0249 #endif
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
0271 const char **endptr)
0272 {
0273 int ret;
0274 unsigned int seg, bus, slot, func;
0275 char *wpath, *p;
0276 char end;
0277
0278 *endptr = strchrnul(path, ';');
0279
0280 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
0281 if (!wpath)
0282 return -ENOMEM;
0283
0284 while (1) {
0285 p = strrchr(wpath, '/');
0286 if (!p)
0287 break;
0288 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
0289 if (ret != 2) {
0290 ret = -EINVAL;
0291 goto free_and_exit;
0292 }
0293
0294 if (dev->devfn != PCI_DEVFN(slot, func)) {
0295 ret = 0;
0296 goto free_and_exit;
0297 }
0298
0299
0300
0301
0302
0303
0304
0305 dev = pci_upstream_bridge(dev);
0306 if (!dev) {
0307 ret = 0;
0308 goto free_and_exit;
0309 }
0310
0311 *p = 0;
0312 }
0313
0314 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
0315 &func, &end);
0316 if (ret != 4) {
0317 seg = 0;
0318 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
0319 if (ret != 3) {
0320 ret = -EINVAL;
0321 goto free_and_exit;
0322 }
0323 }
0324
0325 ret = (seg == pci_domain_nr(dev->bus) &&
0326 bus == dev->bus->number &&
0327 dev->devfn == PCI_DEVFN(slot, func));
0328
0329 free_and_exit:
0330 kfree(wpath);
0331 return ret;
0332 }
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
0365 const char **endptr)
0366 {
0367 int ret;
0368 int count;
0369 unsigned short vendor, device, subsystem_vendor, subsystem_device;
0370
0371 if (strncmp(p, "pci:", 4) == 0) {
0372
0373 p += 4;
0374 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
0375 &subsystem_vendor, &subsystem_device, &count);
0376 if (ret != 4) {
0377 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
0378 if (ret != 2)
0379 return -EINVAL;
0380
0381 subsystem_vendor = 0;
0382 subsystem_device = 0;
0383 }
0384
0385 p += count;
0386
0387 if ((!vendor || vendor == dev->vendor) &&
0388 (!device || device == dev->device) &&
0389 (!subsystem_vendor ||
0390 subsystem_vendor == dev->subsystem_vendor) &&
0391 (!subsystem_device ||
0392 subsystem_device == dev->subsystem_device))
0393 goto found;
0394 } else {
0395
0396
0397
0398
0399 ret = pci_dev_str_match_path(dev, p, &p);
0400 if (ret < 0)
0401 return ret;
0402 else if (ret)
0403 goto found;
0404 }
0405
0406 *endptr = p;
0407 return 0;
0408
0409 found:
0410 *endptr = p;
0411 return 1;
0412 }
0413
0414 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
0415 u8 pos, int cap, int *ttl)
0416 {
0417 u8 id;
0418 u16 ent;
0419
0420 pci_bus_read_config_byte(bus, devfn, pos, &pos);
0421
0422 while ((*ttl)--) {
0423 if (pos < 0x40)
0424 break;
0425 pos &= ~3;
0426 pci_bus_read_config_word(bus, devfn, pos, &ent);
0427
0428 id = ent & 0xff;
0429 if (id == 0xff)
0430 break;
0431 if (id == cap)
0432 return pos;
0433 pos = (ent >> 8);
0434 }
0435 return 0;
0436 }
0437
0438 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
0439 u8 pos, int cap)
0440 {
0441 int ttl = PCI_FIND_CAP_TTL;
0442
0443 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
0444 }
0445
0446 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
0447 {
0448 return __pci_find_next_cap(dev->bus, dev->devfn,
0449 pos + PCI_CAP_LIST_NEXT, cap);
0450 }
0451 EXPORT_SYMBOL_GPL(pci_find_next_capability);
0452
0453 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
0454 unsigned int devfn, u8 hdr_type)
0455 {
0456 u16 status;
0457
0458 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
0459 if (!(status & PCI_STATUS_CAP_LIST))
0460 return 0;
0461
0462 switch (hdr_type) {
0463 case PCI_HEADER_TYPE_NORMAL:
0464 case PCI_HEADER_TYPE_BRIDGE:
0465 return PCI_CAPABILITY_LIST;
0466 case PCI_HEADER_TYPE_CARDBUS:
0467 return PCI_CB_CAPABILITY_LIST;
0468 }
0469
0470 return 0;
0471 }
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 u8 pci_find_capability(struct pci_dev *dev, int cap)
0493 {
0494 u8 pos;
0495
0496 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
0497 if (pos)
0498 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
0499
0500 return pos;
0501 }
0502 EXPORT_SYMBOL(pci_find_capability);
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
0518 {
0519 u8 hdr_type, pos;
0520
0521 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
0522
0523 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
0524 if (pos)
0525 pos = __pci_find_next_cap(bus, devfn, pos, cap);
0526
0527 return pos;
0528 }
0529 EXPORT_SYMBOL(pci_bus_find_capability);
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
0543 {
0544 u32 header;
0545 int ttl;
0546 u16 pos = PCI_CFG_SPACE_SIZE;
0547
0548
0549 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
0550
0551 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
0552 return 0;
0553
0554 if (start)
0555 pos = start;
0556
0557 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
0558 return 0;
0559
0560
0561
0562
0563
0564 if (header == 0)
0565 return 0;
0566
0567 while (ttl-- > 0) {
0568 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
0569 return pos;
0570
0571 pos = PCI_EXT_CAP_NEXT(header);
0572 if (pos < PCI_CFG_SPACE_SIZE)
0573 break;
0574
0575 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
0576 break;
0577 }
0578
0579 return 0;
0580 }
0581 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
0598 {
0599 return pci_find_next_ext_capability(dev, 0, cap);
0600 }
0601 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612 u64 pci_get_dsn(struct pci_dev *dev)
0613 {
0614 u32 dword;
0615 u64 dsn;
0616 int pos;
0617
0618 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
0619 if (!pos)
0620 return 0;
0621
0622
0623
0624
0625
0626
0627 pos += 4;
0628 pci_read_config_dword(dev, pos, &dword);
0629 dsn = (u64)dword;
0630 pci_read_config_dword(dev, pos + 4, &dword);
0631 dsn |= ((u64)dword) << 32;
0632
0633 return dsn;
0634 }
0635 EXPORT_SYMBOL_GPL(pci_get_dsn);
0636
0637 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
0638 {
0639 int rc, ttl = PCI_FIND_CAP_TTL;
0640 u8 cap, mask;
0641
0642 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
0643 mask = HT_3BIT_CAP_MASK;
0644 else
0645 mask = HT_5BIT_CAP_MASK;
0646
0647 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
0648 PCI_CAP_ID_HT, &ttl);
0649 while (pos) {
0650 rc = pci_read_config_byte(dev, pos + 3, &cap);
0651 if (rc != PCIBIOS_SUCCESSFUL)
0652 return 0;
0653
0654 if ((cap & mask) == ht_cap)
0655 return pos;
0656
0657 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
0658 pos + PCI_CAP_LIST_NEXT,
0659 PCI_CAP_ID_HT, &ttl);
0660 }
0661
0662 return 0;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
0679 {
0680 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
0681 }
0682 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
0696 {
0697 u8 pos;
0698
0699 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
0700 if (pos)
0701 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
0702
0703 return pos;
0704 }
0705 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
0718 {
0719 u16 vsec = 0;
0720 u32 header;
0721
0722 if (vendor != dev->vendor)
0723 return 0;
0724
0725 while ((vsec = pci_find_next_ext_capability(dev, vsec,
0726 PCI_EXT_CAP_ID_VNDR))) {
0727 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
0728 &header) == PCIBIOS_SUCCESSFUL &&
0729 PCI_VNDR_HEADER_ID(header) == cap)
0730 return vsec;
0731 }
0732
0733 return 0;
0734 }
0735 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
0747 {
0748 int pos;
0749
0750 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
0751 if (!pos)
0752 return 0;
0753
0754 while (pos) {
0755 u16 v, id;
0756
0757 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
0758 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
0759 if (vendor == v && dvsec == id)
0760 return pos;
0761
0762 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
0763 }
0764
0765 return 0;
0766 }
0767 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
0779 struct resource *res)
0780 {
0781 const struct pci_bus *bus = dev->bus;
0782 struct resource *r;
0783 int i;
0784
0785 pci_bus_for_each_resource(bus, r, i) {
0786 if (!r)
0787 continue;
0788 if (resource_contains(r, res)) {
0789
0790
0791
0792
0793
0794 if (r->flags & IORESOURCE_PREFETCH &&
0795 !(res->flags & IORESOURCE_PREFETCH))
0796 return NULL;
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 return r;
0807 }
0808 }
0809 return NULL;
0810 }
0811 EXPORT_SYMBOL(pci_find_parent_resource);
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
0823 {
0824 int i;
0825
0826 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0827 struct resource *r = &dev->resource[i];
0828
0829 if (r->start && resource_contains(r, res))
0830 return r;
0831 }
0832
0833 return NULL;
0834 }
0835 EXPORT_SYMBOL(pci_find_resource);
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
0846 {
0847 int i;
0848
0849
0850 for (i = 0; i < 4; i++) {
0851 u16 status;
0852 if (i)
0853 msleep((1 << (i - 1)) * 100);
0854
0855 pci_read_config_word(dev, pos, &status);
0856 if (!(status & mask))
0857 return 1;
0858 }
0859
0860 return 0;
0861 }
0862
0863 static int pci_acs_enable;
0864
0865
0866
0867
0868 void pci_request_acs(void)
0869 {
0870 pci_acs_enable = 1;
0871 }
0872
0873 static const char *disable_acs_redir_param;
0874
0875
0876
0877
0878
0879
0880
0881 static void pci_disable_acs_redir(struct pci_dev *dev)
0882 {
0883 int ret = 0;
0884 const char *p;
0885 int pos;
0886 u16 ctrl;
0887
0888 if (!disable_acs_redir_param)
0889 return;
0890
0891 p = disable_acs_redir_param;
0892 while (*p) {
0893 ret = pci_dev_str_match(dev, p, &p);
0894 if (ret < 0) {
0895 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
0896 disable_acs_redir_param);
0897
0898 break;
0899 } else if (ret == 1) {
0900
0901 break;
0902 }
0903
0904 if (*p != ';' && *p != ',') {
0905
0906 break;
0907 }
0908 p++;
0909 }
0910
0911 if (ret != 1)
0912 return;
0913
0914 if (!pci_dev_specific_disable_acs_redir(dev))
0915 return;
0916
0917 pos = dev->acs_cap;
0918 if (!pos) {
0919 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
0920 return;
0921 }
0922
0923 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
0924
0925
0926 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
0927
0928 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
0929
0930 pci_info(dev, "disabled ACS redirect\n");
0931 }
0932
0933
0934
0935
0936
0937 static void pci_std_enable_acs(struct pci_dev *dev)
0938 {
0939 int pos;
0940 u16 cap;
0941 u16 ctrl;
0942
0943 pos = dev->acs_cap;
0944 if (!pos)
0945 return;
0946
0947 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
0948 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
0949
0950
0951 ctrl |= (cap & PCI_ACS_SV);
0952
0953
0954 ctrl |= (cap & PCI_ACS_RR);
0955
0956
0957 ctrl |= (cap & PCI_ACS_CR);
0958
0959
0960 ctrl |= (cap & PCI_ACS_UF);
0961
0962
0963 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
0964 ctrl |= (cap & PCI_ACS_TB);
0965
0966 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
0967 }
0968
0969
0970
0971
0972
0973 static void pci_enable_acs(struct pci_dev *dev)
0974 {
0975 if (!pci_acs_enable)
0976 goto disable_acs_redir;
0977
0978 if (!pci_dev_specific_enable_acs(dev))
0979 goto disable_acs_redir;
0980
0981 pci_std_enable_acs(dev);
0982
0983 disable_acs_redir:
0984
0985
0986
0987
0988
0989
0990
0991 pci_disable_acs_redir(dev);
0992 }
0993
0994
0995
0996
0997
0998
0999
1000
1001 static void pci_restore_bars(struct pci_dev *dev)
1002 {
1003 int i;
1004
1005 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1006 pci_update_resource(dev, i);
1007 }
1008
1009 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1010 {
1011 if (pci_use_mid_pm())
1012 return true;
1013
1014 return acpi_pci_power_manageable(dev);
1015 }
1016
1017 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1018 pci_power_t t)
1019 {
1020 if (pci_use_mid_pm())
1021 return mid_pci_set_power_state(dev, t);
1022
1023 return acpi_pci_set_power_state(dev, t);
1024 }
1025
1026 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1027 {
1028 if (pci_use_mid_pm())
1029 return mid_pci_get_power_state(dev);
1030
1031 return acpi_pci_get_power_state(dev);
1032 }
1033
1034 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1035 {
1036 if (!pci_use_mid_pm())
1037 acpi_pci_refresh_power_state(dev);
1038 }
1039
1040 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1041 {
1042 if (pci_use_mid_pm())
1043 return PCI_POWER_ERROR;
1044
1045 return acpi_pci_choose_state(dev);
1046 }
1047
1048 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1049 {
1050 if (pci_use_mid_pm())
1051 return PCI_POWER_ERROR;
1052
1053 return acpi_pci_wakeup(dev, enable);
1054 }
1055
1056 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1057 {
1058 if (pci_use_mid_pm())
1059 return false;
1060
1061 return acpi_pci_need_resume(dev);
1062 }
1063
1064 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1065 {
1066 if (pci_use_mid_pm())
1067 return false;
1068
1069 return acpi_pci_bridge_d3(dev);
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1085 {
1086 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1087 dev->current_state = PCI_D3cold;
1088 } else if (dev->pm_cap) {
1089 u16 pmcsr;
1090
1091 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1092 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1093 dev->current_state = PCI_D3cold;
1094 return;
1095 }
1096 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1097 } else {
1098 dev->current_state = state;
1099 }
1100 }
1101
1102
1103
1104
1105
1106
1107
1108
1109 void pci_refresh_power_state(struct pci_dev *dev)
1110 {
1111 platform_pci_refresh_power_state(dev);
1112 pci_update_current_state(dev, dev->current_state);
1113 }
1114
1115
1116
1117
1118
1119
1120 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1121 {
1122 int error;
1123
1124 error = platform_pci_set_power_state(dev, state);
1125 if (!error)
1126 pci_update_current_state(dev, state);
1127 else if (!dev->pm_cap)
1128 dev->current_state = PCI_D0;
1129
1130 return error;
1131 }
1132 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1133
1134 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1135 {
1136 pm_request_resume(&pci_dev->dev);
1137 return 0;
1138 }
1139
1140
1141
1142
1143
1144 void pci_resume_bus(struct pci_bus *bus)
1145 {
1146 if (bus)
1147 pci_walk_bus(bus, pci_resume_one, NULL);
1148 }
1149
1150 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1151 {
1152 int delay = 1;
1153 u32 id;
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 pci_read_config_dword(dev, PCI_COMMAND, &id);
1168 while (PCI_POSSIBLE_ERROR(id)) {
1169 if (delay > timeout) {
1170 pci_warn(dev, "not ready %dms after %s; giving up\n",
1171 delay - 1, reset_type);
1172 return -ENOTTY;
1173 }
1174
1175 if (delay > 1000)
1176 pci_info(dev, "not ready %dms after %s; waiting\n",
1177 delay - 1, reset_type);
1178
1179 msleep(delay);
1180 delay *= 2;
1181 pci_read_config_dword(dev, PCI_COMMAND, &id);
1182 }
1183
1184 if (delay > 1000)
1185 pci_info(dev, "ready %dms after %s\n", delay - 1,
1186 reset_type);
1187
1188 return 0;
1189 }
1190
1191
1192
1193
1194
1195
1196
1197
1198 int pci_power_up(struct pci_dev *dev)
1199 {
1200 bool need_restore;
1201 pci_power_t state;
1202 u16 pmcsr;
1203
1204 platform_pci_set_power_state(dev, PCI_D0);
1205
1206 if (!dev->pm_cap) {
1207 state = platform_pci_get_power_state(dev);
1208 if (state == PCI_UNKNOWN)
1209 dev->current_state = PCI_D0;
1210 else
1211 dev->current_state = state;
1212
1213 if (state == PCI_D0)
1214 return 0;
1215
1216 return -EIO;
1217 }
1218
1219 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1220 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1221 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1222 pci_power_name(dev->current_state));
1223 dev->current_state = PCI_D3cold;
1224 return -EIO;
1225 }
1226
1227 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1228
1229 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1230 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1231
1232 if (state == PCI_D0)
1233 goto end;
1234
1235
1236
1237
1238
1239 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1240
1241
1242 if (state == PCI_D3hot)
1243 pci_dev_d3_sleep(dev);
1244 else if (state == PCI_D2)
1245 udelay(PCI_PM_D2_DELAY);
1246
1247 end:
1248 dev->current_state = PCI_D0;
1249 if (need_restore)
1250 return 1;
1251
1252 return 0;
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 static int pci_set_full_power_state(struct pci_dev *dev)
1268 {
1269 u16 pmcsr;
1270 int ret;
1271
1272 ret = pci_power_up(dev);
1273 if (ret < 0)
1274 return ret;
1275
1276 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1277 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1278 if (dev->current_state != PCI_D0) {
1279 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1280 pci_power_name(dev->current_state));
1281 } else if (ret > 0) {
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 pci_restore_bars(dev);
1296 }
1297
1298 return 0;
1299 }
1300
1301
1302
1303
1304
1305
1306 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1307 {
1308 pci_power_t state = *(pci_power_t *)data;
1309
1310 dev->current_state = state;
1311 return 0;
1312 }
1313
1314
1315
1316
1317
1318
1319 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1320 {
1321 if (bus)
1322 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1323 }
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
1340 {
1341 u16 pmcsr;
1342
1343 if (!dev->pm_cap)
1344 return -EIO;
1345
1346
1347
1348
1349
1350
1351
1352 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1353 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1354 pci_power_name(dev->current_state),
1355 pci_power_name(state));
1356 return -EINVAL;
1357 }
1358
1359
1360 if ((state == PCI_D1 && !dev->d1_support)
1361 || (state == PCI_D2 && !dev->d2_support))
1362 return -EIO;
1363
1364 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1365 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1366 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1367 pci_power_name(dev->current_state),
1368 pci_power_name(state));
1369 dev->current_state = PCI_D3cold;
1370 return -EIO;
1371 }
1372
1373 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1374 pmcsr |= state;
1375
1376
1377 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1378
1379
1380 if (state == PCI_D3hot)
1381 pci_dev_d3_sleep(dev);
1382 else if (state == PCI_D2)
1383 udelay(PCI_PM_D2_DELAY);
1384
1385 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1386 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1387 if (dev->current_state != state)
1388 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1389 pci_power_name(dev->current_state),
1390 pci_power_name(state));
1391
1392 return 0;
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1413 {
1414 int error;
1415
1416
1417 if (state > PCI_D3cold)
1418 state = PCI_D3cold;
1419 else if (state < PCI_D0)
1420 state = PCI_D0;
1421 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1422
1423
1424
1425
1426
1427
1428
1429 return 0;
1430
1431
1432 if (dev->current_state == state)
1433 return 0;
1434
1435 if (state == PCI_D0)
1436 return pci_set_full_power_state(dev);
1437
1438
1439
1440
1441
1442 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1443 return 0;
1444
1445 if (state == PCI_D3cold) {
1446
1447
1448
1449
1450 error = pci_set_low_power_state(dev, PCI_D3hot);
1451
1452 if (pci_platform_power_transition(dev, PCI_D3cold))
1453 return error;
1454
1455
1456 if (dev->current_state == PCI_D3cold)
1457 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1458 } else {
1459 error = pci_set_low_power_state(dev, state);
1460
1461 if (pci_platform_power_transition(dev, state))
1462 return error;
1463 }
1464
1465 return 0;
1466 }
1467 EXPORT_SYMBOL(pci_set_power_state);
1468
1469 #define PCI_EXP_SAVE_REGS 7
1470
1471 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1472 u16 cap, bool extended)
1473 {
1474 struct pci_cap_saved_state *tmp;
1475
1476 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1477 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1478 return tmp;
1479 }
1480 return NULL;
1481 }
1482
1483 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1484 {
1485 return _pci_find_saved_cap(dev, cap, false);
1486 }
1487
1488 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1489 {
1490 return _pci_find_saved_cap(dev, cap, true);
1491 }
1492
1493 static int pci_save_pcie_state(struct pci_dev *dev)
1494 {
1495 int i = 0;
1496 struct pci_cap_saved_state *save_state;
1497 u16 *cap;
1498
1499 if (!pci_is_pcie(dev))
1500 return 0;
1501
1502 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1503 if (!save_state) {
1504 pci_err(dev, "buffer not found in %s\n", __func__);
1505 return -ENOMEM;
1506 }
1507
1508 cap = (u16 *)&save_state->cap.data[0];
1509 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1510 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1511 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1512 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1513 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1514 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1515 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1516
1517 return 0;
1518 }
1519
1520 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1521 {
1522 #ifdef CONFIG_PCIEASPM
1523 struct pci_dev *bridge;
1524 u32 ctl;
1525
1526 bridge = pci_upstream_bridge(dev);
1527 if (bridge && bridge->ltr_path) {
1528 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1529 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1530 pci_dbg(bridge, "re-enabling LTR\n");
1531 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1532 PCI_EXP_DEVCTL2_LTR_EN);
1533 }
1534 }
1535 #endif
1536 }
1537
1538 static void pci_restore_pcie_state(struct pci_dev *dev)
1539 {
1540 int i = 0;
1541 struct pci_cap_saved_state *save_state;
1542 u16 *cap;
1543
1544 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1545 if (!save_state)
1546 return;
1547
1548
1549
1550
1551
1552
1553 pci_bridge_reconfigure_ltr(dev);
1554
1555 cap = (u16 *)&save_state->cap.data[0];
1556 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1557 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1558 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1559 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1560 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1561 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1562 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1563 }
1564
1565 static int pci_save_pcix_state(struct pci_dev *dev)
1566 {
1567 int pos;
1568 struct pci_cap_saved_state *save_state;
1569
1570 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1571 if (!pos)
1572 return 0;
1573
1574 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1575 if (!save_state) {
1576 pci_err(dev, "buffer not found in %s\n", __func__);
1577 return -ENOMEM;
1578 }
1579
1580 pci_read_config_word(dev, pos + PCI_X_CMD,
1581 (u16 *)save_state->cap.data);
1582
1583 return 0;
1584 }
1585
1586 static void pci_restore_pcix_state(struct pci_dev *dev)
1587 {
1588 int i = 0, pos;
1589 struct pci_cap_saved_state *save_state;
1590 u16 *cap;
1591
1592 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1593 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1594 if (!save_state || !pos)
1595 return;
1596 cap = (u16 *)&save_state->cap.data[0];
1597
1598 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1599 }
1600
1601 static void pci_save_ltr_state(struct pci_dev *dev)
1602 {
1603 int ltr;
1604 struct pci_cap_saved_state *save_state;
1605 u32 *cap;
1606
1607 if (!pci_is_pcie(dev))
1608 return;
1609
1610 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1611 if (!ltr)
1612 return;
1613
1614 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1615 if (!save_state) {
1616 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1617 return;
1618 }
1619
1620
1621 cap = &save_state->cap.data[0];
1622 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1623 }
1624
1625 static void pci_restore_ltr_state(struct pci_dev *dev)
1626 {
1627 struct pci_cap_saved_state *save_state;
1628 int ltr;
1629 u32 *cap;
1630
1631 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1632 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1633 if (!save_state || !ltr)
1634 return;
1635
1636
1637 cap = &save_state->cap.data[0];
1638 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1639 }
1640
1641
1642
1643
1644
1645
1646 int pci_save_state(struct pci_dev *dev)
1647 {
1648 int i;
1649
1650 for (i = 0; i < 16; i++) {
1651 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1652 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1653 i * 4, dev->saved_config_space[i]);
1654 }
1655 dev->state_saved = true;
1656
1657 i = pci_save_pcie_state(dev);
1658 if (i != 0)
1659 return i;
1660
1661 i = pci_save_pcix_state(dev);
1662 if (i != 0)
1663 return i;
1664
1665 pci_save_ltr_state(dev);
1666 pci_save_dpc_state(dev);
1667 pci_save_aer_state(dev);
1668 pci_save_ptm_state(dev);
1669 return pci_save_vc_state(dev);
1670 }
1671 EXPORT_SYMBOL(pci_save_state);
1672
1673 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1674 u32 saved_val, int retry, bool force)
1675 {
1676 u32 val;
1677
1678 pci_read_config_dword(pdev, offset, &val);
1679 if (!force && val == saved_val)
1680 return;
1681
1682 for (;;) {
1683 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1684 offset, val, saved_val);
1685 pci_write_config_dword(pdev, offset, saved_val);
1686 if (retry-- <= 0)
1687 return;
1688
1689 pci_read_config_dword(pdev, offset, &val);
1690 if (val == saved_val)
1691 return;
1692
1693 mdelay(1);
1694 }
1695 }
1696
1697 static void pci_restore_config_space_range(struct pci_dev *pdev,
1698 int start, int end, int retry,
1699 bool force)
1700 {
1701 int index;
1702
1703 for (index = end; index >= start; index--)
1704 pci_restore_config_dword(pdev, 4 * index,
1705 pdev->saved_config_space[index],
1706 retry, force);
1707 }
1708
1709 static void pci_restore_config_space(struct pci_dev *pdev)
1710 {
1711 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1712 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1713
1714 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1715 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1716 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1717 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1718
1719
1720
1721
1722
1723
1724 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1725 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1726 } else {
1727 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1728 }
1729 }
1730
1731 static void pci_restore_rebar_state(struct pci_dev *pdev)
1732 {
1733 unsigned int pos, nbars, i;
1734 u32 ctrl;
1735
1736 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1737 if (!pos)
1738 return;
1739
1740 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1741 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1742 PCI_REBAR_CTRL_NBAR_SHIFT;
1743
1744 for (i = 0; i < nbars; i++, pos += 8) {
1745 struct resource *res;
1746 int bar_idx, size;
1747
1748 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1749 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1750 res = pdev->resource + bar_idx;
1751 size = pci_rebar_bytes_to_size(resource_size(res));
1752 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1753 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1754 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1755 }
1756 }
1757
1758
1759
1760
1761
1762 void pci_restore_state(struct pci_dev *dev)
1763 {
1764 if (!dev->state_saved)
1765 return;
1766
1767
1768
1769
1770
1771 pci_restore_ltr_state(dev);
1772
1773 pci_restore_pcie_state(dev);
1774 pci_restore_pasid_state(dev);
1775 pci_restore_pri_state(dev);
1776 pci_restore_ats_state(dev);
1777 pci_restore_vc_state(dev);
1778 pci_restore_rebar_state(dev);
1779 pci_restore_dpc_state(dev);
1780 pci_restore_ptm_state(dev);
1781
1782 pci_aer_clear_status(dev);
1783 pci_restore_aer_state(dev);
1784
1785 pci_restore_config_space(dev);
1786
1787 pci_restore_pcix_state(dev);
1788 pci_restore_msi_state(dev);
1789
1790
1791 pci_enable_acs(dev);
1792 pci_restore_iov_state(dev);
1793
1794 dev->state_saved = false;
1795 }
1796 EXPORT_SYMBOL(pci_restore_state);
1797
1798 struct pci_saved_state {
1799 u32 config_space[16];
1800 struct pci_cap_saved_data cap[];
1801 };
1802
1803
1804
1805
1806
1807
1808
1809
1810 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1811 {
1812 struct pci_saved_state *state;
1813 struct pci_cap_saved_state *tmp;
1814 struct pci_cap_saved_data *cap;
1815 size_t size;
1816
1817 if (!dev->state_saved)
1818 return NULL;
1819
1820 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1821
1822 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1823 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1824
1825 state = kzalloc(size, GFP_KERNEL);
1826 if (!state)
1827 return NULL;
1828
1829 memcpy(state->config_space, dev->saved_config_space,
1830 sizeof(state->config_space));
1831
1832 cap = state->cap;
1833 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1834 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1835 memcpy(cap, &tmp->cap, len);
1836 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1837 }
1838
1839
1840 return state;
1841 }
1842 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1843
1844
1845
1846
1847
1848
1849 int pci_load_saved_state(struct pci_dev *dev,
1850 struct pci_saved_state *state)
1851 {
1852 struct pci_cap_saved_data *cap;
1853
1854 dev->state_saved = false;
1855
1856 if (!state)
1857 return 0;
1858
1859 memcpy(dev->saved_config_space, state->config_space,
1860 sizeof(state->config_space));
1861
1862 cap = state->cap;
1863 while (cap->size) {
1864 struct pci_cap_saved_state *tmp;
1865
1866 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1867 if (!tmp || tmp->cap.size != cap->size)
1868 return -EINVAL;
1869
1870 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1871 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1872 sizeof(struct pci_cap_saved_data) + cap->size);
1873 }
1874
1875 dev->state_saved = true;
1876 return 0;
1877 }
1878 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1879
1880
1881
1882
1883
1884
1885
1886 int pci_load_and_free_saved_state(struct pci_dev *dev,
1887 struct pci_saved_state **state)
1888 {
1889 int ret = pci_load_saved_state(dev, *state);
1890 kfree(*state);
1891 *state = NULL;
1892 return ret;
1893 }
1894 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1895
1896 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1897 {
1898 return pci_enable_resources(dev, bars);
1899 }
1900
1901 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1902 {
1903 int err;
1904 struct pci_dev *bridge;
1905 u16 cmd;
1906 u8 pin;
1907
1908 err = pci_set_power_state(dev, PCI_D0);
1909 if (err < 0 && err != -EIO)
1910 return err;
1911
1912 bridge = pci_upstream_bridge(dev);
1913 if (bridge)
1914 pcie_aspm_powersave_config_link(bridge);
1915
1916 err = pcibios_enable_device(dev, bars);
1917 if (err < 0)
1918 return err;
1919 pci_fixup_device(pci_fixup_enable, dev);
1920
1921 if (dev->msi_enabled || dev->msix_enabled)
1922 return 0;
1923
1924 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1925 if (pin) {
1926 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1927 if (cmd & PCI_COMMAND_INTX_DISABLE)
1928 pci_write_config_word(dev, PCI_COMMAND,
1929 cmd & ~PCI_COMMAND_INTX_DISABLE);
1930 }
1931
1932 return 0;
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942 int pci_reenable_device(struct pci_dev *dev)
1943 {
1944 if (pci_is_enabled(dev))
1945 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1946 return 0;
1947 }
1948 EXPORT_SYMBOL(pci_reenable_device);
1949
1950 static void pci_enable_bridge(struct pci_dev *dev)
1951 {
1952 struct pci_dev *bridge;
1953 int retval;
1954
1955 bridge = pci_upstream_bridge(dev);
1956 if (bridge)
1957 pci_enable_bridge(bridge);
1958
1959 if (pci_is_enabled(dev)) {
1960 if (!dev->is_busmaster)
1961 pci_set_master(dev);
1962 return;
1963 }
1964
1965 retval = pci_enable_device(dev);
1966 if (retval)
1967 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1968 retval);
1969 pci_set_master(dev);
1970 }
1971
1972 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1973 {
1974 struct pci_dev *bridge;
1975 int err;
1976 int i, bars = 0;
1977
1978
1979
1980
1981
1982
1983
1984 pci_update_current_state(dev, dev->current_state);
1985
1986 if (atomic_inc_return(&dev->enable_cnt) > 1)
1987 return 0;
1988
1989 bridge = pci_upstream_bridge(dev);
1990 if (bridge)
1991 pci_enable_bridge(bridge);
1992
1993
1994 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1995 if (dev->resource[i].flags & flags)
1996 bars |= (1 << i);
1997 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1998 if (dev->resource[i].flags & flags)
1999 bars |= (1 << i);
2000
2001 err = do_pci_enable_device(dev, bars);
2002 if (err < 0)
2003 atomic_dec(&dev->enable_cnt);
2004 return err;
2005 }
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015 int pci_enable_device_io(struct pci_dev *dev)
2016 {
2017 return pci_enable_device_flags(dev, IORESOURCE_IO);
2018 }
2019 EXPORT_SYMBOL(pci_enable_device_io);
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 int pci_enable_device_mem(struct pci_dev *dev)
2030 {
2031 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2032 }
2033 EXPORT_SYMBOL(pci_enable_device_mem);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 int pci_enable_device(struct pci_dev *dev)
2047 {
2048 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2049 }
2050 EXPORT_SYMBOL(pci_enable_device);
2051
2052
2053
2054
2055
2056
2057
2058 struct pci_devres {
2059 unsigned int enabled:1;
2060 unsigned int pinned:1;
2061 unsigned int orig_intx:1;
2062 unsigned int restore_intx:1;
2063 unsigned int mwi:1;
2064 u32 region_mask;
2065 };
2066
2067 static void pcim_release(struct device *gendev, void *res)
2068 {
2069 struct pci_dev *dev = to_pci_dev(gendev);
2070 struct pci_devres *this = res;
2071 int i;
2072
2073 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2074 if (this->region_mask & (1 << i))
2075 pci_release_region(dev, i);
2076
2077 if (this->mwi)
2078 pci_clear_mwi(dev);
2079
2080 if (this->restore_intx)
2081 pci_intx(dev, this->orig_intx);
2082
2083 if (this->enabled && !this->pinned)
2084 pci_disable_device(dev);
2085 }
2086
2087 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2088 {
2089 struct pci_devres *dr, *new_dr;
2090
2091 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2092 if (dr)
2093 return dr;
2094
2095 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2096 if (!new_dr)
2097 return NULL;
2098 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2099 }
2100
2101 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2102 {
2103 if (pci_is_managed(pdev))
2104 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2105 return NULL;
2106 }
2107
2108
2109
2110
2111
2112
2113
2114 int pcim_enable_device(struct pci_dev *pdev)
2115 {
2116 struct pci_devres *dr;
2117 int rc;
2118
2119 dr = get_pci_dr(pdev);
2120 if (unlikely(!dr))
2121 return -ENOMEM;
2122 if (dr->enabled)
2123 return 0;
2124
2125 rc = pci_enable_device(pdev);
2126 if (!rc) {
2127 pdev->is_managed = 1;
2128 dr->enabled = 1;
2129 }
2130 return rc;
2131 }
2132 EXPORT_SYMBOL(pcim_enable_device);
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 void pcim_pin_device(struct pci_dev *pdev)
2143 {
2144 struct pci_devres *dr;
2145
2146 dr = find_pci_dr(pdev);
2147 WARN_ON(!dr || !dr->enabled);
2148 if (dr)
2149 dr->pinned = 1;
2150 }
2151 EXPORT_SYMBOL(pcim_pin_device);
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161 int __weak pcibios_device_add(struct pci_dev *dev)
2162 {
2163 return 0;
2164 }
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 void __weak pcibios_release_device(struct pci_dev *dev) {}
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2197
2198 static void do_pci_disable_device(struct pci_dev *dev)
2199 {
2200 u16 pci_command;
2201
2202 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2203 if (pci_command & PCI_COMMAND_MASTER) {
2204 pci_command &= ~PCI_COMMAND_MASTER;
2205 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2206 }
2207
2208 pcibios_disable_device(dev);
2209 }
2210
2211
2212
2213
2214
2215
2216
2217
2218 void pci_disable_enabled_device(struct pci_dev *dev)
2219 {
2220 if (pci_is_enabled(dev))
2221 do_pci_disable_device(dev);
2222 }
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234 void pci_disable_device(struct pci_dev *dev)
2235 {
2236 struct pci_devres *dr;
2237
2238 dr = find_pci_dr(dev);
2239 if (dr)
2240 dr->enabled = 0;
2241
2242 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2243 "disabling already-disabled device");
2244
2245 if (atomic_dec_return(&dev->enable_cnt) != 0)
2246 return;
2247
2248 do_pci_disable_device(dev);
2249
2250 dev->is_busmaster = 0;
2251 }
2252 EXPORT_SYMBOL(pci_disable_device);
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2263 enum pcie_reset_state state)
2264 {
2265 return -EINVAL;
2266 }
2267
2268
2269
2270
2271
2272
2273
2274
2275 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2276 {
2277 return pcibios_set_pcie_reset_state(dev, state);
2278 }
2279 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2280
2281 #ifdef CONFIG_PCIEAER
2282 void pcie_clear_device_status(struct pci_dev *dev)
2283 {
2284 u16 sta;
2285
2286 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2287 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2288 }
2289 #endif
2290
2291
2292
2293
2294
2295 void pcie_clear_root_pme_status(struct pci_dev *dev)
2296 {
2297 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2298 }
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 bool pci_check_pme_status(struct pci_dev *dev)
2309 {
2310 int pmcsr_pos;
2311 u16 pmcsr;
2312 bool ret = false;
2313
2314 if (!dev->pm_cap)
2315 return false;
2316
2317 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2318 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2319 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2320 return false;
2321
2322
2323 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2324 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2325
2326 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2327 ret = true;
2328 }
2329
2330 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2331
2332 return ret;
2333 }
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2344 {
2345 if (pme_poll_reset && dev->pme_poll)
2346 dev->pme_poll = false;
2347
2348 if (pci_check_pme_status(dev)) {
2349 pci_wakeup_event(dev);
2350 pm_request_resume(&dev->dev);
2351 }
2352 return 0;
2353 }
2354
2355
2356
2357
2358
2359 void pci_pme_wakeup_bus(struct pci_bus *bus)
2360 {
2361 if (bus)
2362 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2363 }
2364
2365
2366
2367
2368
2369
2370
2371 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2372 {
2373 if (!dev->pm_cap)
2374 return false;
2375
2376 return !!(dev->pme_support & (1 << state));
2377 }
2378 EXPORT_SYMBOL(pci_pme_capable);
2379
2380 static void pci_pme_list_scan(struct work_struct *work)
2381 {
2382 struct pci_pme_device *pme_dev, *n;
2383
2384 mutex_lock(&pci_pme_list_mutex);
2385 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2386 if (pme_dev->dev->pme_poll) {
2387 struct pci_dev *bridge;
2388
2389 bridge = pme_dev->dev->bus->self;
2390
2391
2392
2393
2394
2395 if (bridge && bridge->current_state != PCI_D0)
2396 continue;
2397
2398
2399
2400
2401 if (pme_dev->dev->current_state == PCI_D3cold)
2402 continue;
2403
2404 pci_pme_wakeup(pme_dev->dev, NULL);
2405 } else {
2406 list_del(&pme_dev->list);
2407 kfree(pme_dev);
2408 }
2409 }
2410 if (!list_empty(&pci_pme_list))
2411 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2412 msecs_to_jiffies(PME_TIMEOUT));
2413 mutex_unlock(&pci_pme_list_mutex);
2414 }
2415
2416 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2417 {
2418 u16 pmcsr;
2419
2420 if (!dev->pme_support)
2421 return;
2422
2423 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2424
2425 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2426 if (!enable)
2427 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2428
2429 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2430 }
2431
2432
2433
2434
2435
2436 void pci_pme_restore(struct pci_dev *dev)
2437 {
2438 u16 pmcsr;
2439
2440 if (!dev->pme_support)
2441 return;
2442
2443 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2444 if (dev->wakeup_prepared) {
2445 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2446 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2447 } else {
2448 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2449 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2450 }
2451 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2452 }
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462 void pci_pme_active(struct pci_dev *dev, bool enable)
2463 {
2464 __pci_pme_active(dev, enable);
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 if (dev->pme_poll) {
2487 struct pci_pme_device *pme_dev;
2488 if (enable) {
2489 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2490 GFP_KERNEL);
2491 if (!pme_dev) {
2492 pci_warn(dev, "can't enable PME#\n");
2493 return;
2494 }
2495 pme_dev->dev = dev;
2496 mutex_lock(&pci_pme_list_mutex);
2497 list_add(&pme_dev->list, &pci_pme_list);
2498 if (list_is_singular(&pci_pme_list))
2499 queue_delayed_work(system_freezable_wq,
2500 &pci_pme_work,
2501 msecs_to_jiffies(PME_TIMEOUT));
2502 mutex_unlock(&pci_pme_list_mutex);
2503 } else {
2504 mutex_lock(&pci_pme_list_mutex);
2505 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2506 if (pme_dev->dev == dev) {
2507 list_del(&pme_dev->list);
2508 kfree(pme_dev);
2509 break;
2510 }
2511 }
2512 mutex_unlock(&pci_pme_list_mutex);
2513 }
2514 }
2515
2516 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2517 }
2518 EXPORT_SYMBOL(pci_pme_active);
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2540 {
2541 int ret = 0;
2542
2543
2544
2545
2546
2547
2548
2549
2550 if (!pci_power_manageable(dev))
2551 return 0;
2552
2553
2554 if (!!enable == !!dev->wakeup_prepared)
2555 return 0;
2556
2557
2558
2559
2560
2561
2562
2563 if (enable) {
2564 int error;
2565
2566
2567
2568
2569
2570
2571
2572
2573 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2574 pci_pme_active(dev, true);
2575 else
2576 ret = 1;
2577 error = platform_pci_set_wakeup(dev, true);
2578 if (ret)
2579 ret = error;
2580 if (!ret)
2581 dev->wakeup_prepared = true;
2582 } else {
2583 platform_pci_set_wakeup(dev, false);
2584 pci_pme_active(dev, false);
2585 dev->wakeup_prepared = false;
2586 }
2587
2588 return ret;
2589 }
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2601 {
2602 if (enable && !device_may_wakeup(&pci_dev->dev))
2603 return -EINVAL;
2604
2605 return __pci_enable_wake(pci_dev, state, enable);
2606 }
2607 EXPORT_SYMBOL(pci_enable_wake);
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2624 {
2625 return pci_pme_capable(dev, PCI_D3cold) ?
2626 pci_enable_wake(dev, PCI_D3cold, enable) :
2627 pci_enable_wake(dev, PCI_D3hot, enable);
2628 }
2629 EXPORT_SYMBOL(pci_wake_from_d3);
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2641 {
2642 if (platform_pci_power_manageable(dev)) {
2643
2644
2645
2646 pci_power_t state = platform_pci_choose_state(dev);
2647
2648 switch (state) {
2649 case PCI_POWER_ERROR:
2650 case PCI_UNKNOWN:
2651 return PCI_D3hot;
2652
2653 case PCI_D1:
2654 case PCI_D2:
2655 if (pci_no_d1d2(dev))
2656 return PCI_D3hot;
2657 }
2658
2659 return state;
2660 }
2661
2662
2663
2664
2665
2666
2667 if (dev->current_state == PCI_D3cold)
2668 return PCI_D3cold;
2669 else if (!dev->pm_cap)
2670 return PCI_D0;
2671
2672 if (wakeup && dev->pme_support) {
2673 pci_power_t state = PCI_D3hot;
2674
2675
2676
2677
2678
2679 while (state && !(dev->pme_support & (1 << state)))
2680 state--;
2681
2682 if (state)
2683 return state;
2684 else if (dev->pme_support & 1)
2685 return PCI_D0;
2686 }
2687
2688 return PCI_D3hot;
2689 }
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700 int pci_prepare_to_sleep(struct pci_dev *dev)
2701 {
2702 bool wakeup = device_may_wakeup(&dev->dev);
2703 pci_power_t target_state = pci_target_state(dev, wakeup);
2704 int error;
2705
2706 if (target_state == PCI_POWER_ERROR)
2707 return -EIO;
2708
2709
2710
2711
2712
2713
2714
2715
2716 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2717 pci_disable_ptm(dev);
2718
2719 pci_enable_wake(dev, target_state, wakeup);
2720
2721 error = pci_set_power_state(dev, target_state);
2722
2723 if (error) {
2724 pci_enable_wake(dev, target_state, false);
2725 pci_restore_ptm_state(dev);
2726 }
2727
2728 return error;
2729 }
2730 EXPORT_SYMBOL(pci_prepare_to_sleep);
2731
2732
2733
2734
2735
2736
2737
2738
2739 int pci_back_from_sleep(struct pci_dev *dev)
2740 {
2741 int ret = pci_set_power_state(dev, PCI_D0);
2742
2743 if (ret)
2744 return ret;
2745
2746 pci_enable_wake(dev, PCI_D0, false);
2747 return 0;
2748 }
2749 EXPORT_SYMBOL(pci_back_from_sleep);
2750
2751
2752
2753
2754
2755
2756
2757
2758 int pci_finish_runtime_suspend(struct pci_dev *dev)
2759 {
2760 pci_power_t target_state;
2761 int error;
2762
2763 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2764 if (target_state == PCI_POWER_ERROR)
2765 return -EIO;
2766
2767
2768
2769
2770
2771
2772
2773
2774 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2775 pci_disable_ptm(dev);
2776
2777 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2778
2779 error = pci_set_power_state(dev, target_state);
2780
2781 if (error) {
2782 pci_enable_wake(dev, target_state, false);
2783 pci_restore_ptm_state(dev);
2784 }
2785
2786 return error;
2787 }
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797 bool pci_dev_run_wake(struct pci_dev *dev)
2798 {
2799 struct pci_bus *bus = dev->bus;
2800
2801 if (!dev->pme_support)
2802 return false;
2803
2804
2805 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2806 return false;
2807
2808 if (device_can_wakeup(&dev->dev))
2809 return true;
2810
2811 while (bus->parent) {
2812 struct pci_dev *bridge = bus->self;
2813
2814 if (device_can_wakeup(&bridge->dev))
2815 return true;
2816
2817 bus = bus->parent;
2818 }
2819
2820
2821 if (bus->bridge)
2822 return device_can_wakeup(bus->bridge);
2823
2824 return false;
2825 }
2826 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2838 {
2839 struct device *dev = &pci_dev->dev;
2840 pci_power_t target_state;
2841
2842 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2843 return true;
2844
2845 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2846
2847
2848
2849
2850
2851
2852 return target_state != pci_dev->current_state &&
2853 target_state != PCI_D3cold &&
2854 pci_dev->current_state != PCI_D3hot;
2855 }
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2869 {
2870 struct device *dev = &pci_dev->dev;
2871
2872 spin_lock_irq(&dev->power.lock);
2873
2874 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2875 pci_dev->current_state < PCI_D3cold)
2876 __pci_pme_active(pci_dev, false);
2877
2878 spin_unlock_irq(&dev->power.lock);
2879 }
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2890 {
2891 struct device *dev = &pci_dev->dev;
2892
2893 if (!pci_dev_run_wake(pci_dev))
2894 return;
2895
2896 spin_lock_irq(&dev->power.lock);
2897
2898 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2899 __pci_pme_active(pci_dev, true);
2900
2901 spin_unlock_irq(&dev->power.lock);
2902 }
2903
2904
2905
2906
2907
2908
2909
2910
2911 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2912 {
2913 if (state.event == PM_EVENT_ON)
2914 return PCI_D0;
2915
2916 return pci_target_state(dev, false);
2917 }
2918 EXPORT_SYMBOL(pci_choose_state);
2919
2920 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2921 {
2922 struct device *dev = &pdev->dev;
2923 struct device *parent = dev->parent;
2924
2925 if (parent)
2926 pm_runtime_get_sync(parent);
2927 pm_runtime_get_noresume(dev);
2928
2929
2930
2931
2932 pm_runtime_barrier(dev);
2933
2934
2935
2936
2937
2938 if (pdev->current_state == PCI_D3cold)
2939 pm_runtime_resume(dev);
2940 }
2941
2942 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2943 {
2944 struct device *dev = &pdev->dev;
2945 struct device *parent = dev->parent;
2946
2947 pm_runtime_put(dev);
2948 if (parent)
2949 pm_runtime_put_sync(parent);
2950 }
2951
2952 static const struct dmi_system_id bridge_d3_blacklist[] = {
2953 #ifdef CONFIG_X86
2954 {
2955
2956
2957
2958
2959
2960
2961 .ident = "X299 DESIGNARE EX-CF",
2962 .matches = {
2963 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2964 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2965 },
2966 },
2967 {
2968
2969
2970
2971
2972 .ident = "Elo i2",
2973 .matches = {
2974 DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
2975 DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
2976 DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
2977 },
2978 },
2979 #endif
2980 { }
2981 };
2982
2983
2984
2985
2986
2987
2988
2989
2990 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2991 {
2992 if (!pci_is_pcie(bridge))
2993 return false;
2994
2995 switch (pci_pcie_type(bridge)) {
2996 case PCI_EXP_TYPE_ROOT_PORT:
2997 case PCI_EXP_TYPE_UPSTREAM:
2998 case PCI_EXP_TYPE_DOWNSTREAM:
2999 if (pci_bridge_d3_disable)
3000 return false;
3001
3002
3003
3004
3005
3006 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3007 return false;
3008
3009 if (pci_bridge_d3_force)
3010 return true;
3011
3012
3013 if (bridge->is_thunderbolt)
3014 return true;
3015
3016
3017 if (platform_pci_bridge_d3(bridge))
3018 return true;
3019
3020
3021
3022
3023
3024
3025 if (bridge->is_hotplug_bridge)
3026 return false;
3027
3028 if (dmi_check_system(bridge_d3_blacklist))
3029 return false;
3030
3031
3032
3033
3034
3035 if (dmi_get_bios_year() >= 2015)
3036 return true;
3037 break;
3038 }
3039
3040 return false;
3041 }
3042
3043 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3044 {
3045 bool *d3cold_ok = data;
3046
3047 if (
3048 dev->no_d3cold || !dev->d3cold_allowed ||
3049
3050
3051 (device_may_wakeup(&dev->dev) &&
3052 !pci_pme_capable(dev, PCI_D3cold)) ||
3053
3054
3055 !pci_power_manageable(dev))
3056
3057 *d3cold_ok = false;
3058
3059 return !*d3cold_ok;
3060 }
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070 void pci_bridge_d3_update(struct pci_dev *dev)
3071 {
3072 bool remove = !device_is_registered(&dev->dev);
3073 struct pci_dev *bridge;
3074 bool d3cold_ok = true;
3075
3076 bridge = pci_upstream_bridge(dev);
3077 if (!bridge || !pci_bridge_d3_possible(bridge))
3078 return;
3079
3080
3081
3082
3083
3084 if (remove && bridge->bridge_d3)
3085 return;
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095 if (!remove)
3096 pci_dev_check_d3cold(dev, &d3cold_ok);
3097
3098
3099
3100
3101
3102
3103
3104 if (d3cold_ok && !bridge->bridge_d3)
3105 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3106 &d3cold_ok);
3107
3108 if (bridge->bridge_d3 != d3cold_ok) {
3109 bridge->bridge_d3 = d3cold_ok;
3110
3111 pci_bridge_d3_update(bridge);
3112 }
3113 }
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123 void pci_d3cold_enable(struct pci_dev *dev)
3124 {
3125 if (dev->no_d3cold) {
3126 dev->no_d3cold = false;
3127 pci_bridge_d3_update(dev);
3128 }
3129 }
3130 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140 void pci_d3cold_disable(struct pci_dev *dev)
3141 {
3142 if (!dev->no_d3cold) {
3143 dev->no_d3cold = true;
3144 pci_bridge_d3_update(dev);
3145 }
3146 }
3147 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3148
3149
3150
3151
3152
3153 void pci_pm_init(struct pci_dev *dev)
3154 {
3155 int pm;
3156 u16 status;
3157 u16 pmc;
3158
3159 pm_runtime_forbid(&dev->dev);
3160 pm_runtime_set_active(&dev->dev);
3161 pm_runtime_enable(&dev->dev);
3162 device_enable_async_suspend(&dev->dev);
3163 dev->wakeup_prepared = false;
3164
3165 dev->pm_cap = 0;
3166 dev->pme_support = 0;
3167
3168
3169 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3170 if (!pm)
3171 return;
3172
3173 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3174
3175 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3176 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3177 pmc & PCI_PM_CAP_VER_MASK);
3178 return;
3179 }
3180
3181 dev->pm_cap = pm;
3182 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3183 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3184 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3185 dev->d3cold_allowed = true;
3186
3187 dev->d1_support = false;
3188 dev->d2_support = false;
3189 if (!pci_no_d1d2(dev)) {
3190 if (pmc & PCI_PM_CAP_D1)
3191 dev->d1_support = true;
3192 if (pmc & PCI_PM_CAP_D2)
3193 dev->d2_support = true;
3194
3195 if (dev->d1_support || dev->d2_support)
3196 pci_info(dev, "supports%s%s\n",
3197 dev->d1_support ? " D1" : "",
3198 dev->d2_support ? " D2" : "");
3199 }
3200
3201 pmc &= PCI_PM_CAP_PME_MASK;
3202 if (pmc) {
3203 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3204 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3205 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3206 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3207 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3208 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3209 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3210 dev->pme_poll = true;
3211
3212
3213
3214
3215 device_set_wakeup_capable(&dev->dev, true);
3216
3217 pci_pme_active(dev, false);
3218 }
3219
3220 pci_read_config_word(dev, PCI_STATUS, &status);
3221 if (status & PCI_STATUS_IMM_READY)
3222 dev->imm_ready = 1;
3223 }
3224
3225 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3226 {
3227 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3228
3229 switch (prop) {
3230 case PCI_EA_P_MEM:
3231 case PCI_EA_P_VF_MEM:
3232 flags |= IORESOURCE_MEM;
3233 break;
3234 case PCI_EA_P_MEM_PREFETCH:
3235 case PCI_EA_P_VF_MEM_PREFETCH:
3236 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3237 break;
3238 case PCI_EA_P_IO:
3239 flags |= IORESOURCE_IO;
3240 break;
3241 default:
3242 return 0;
3243 }
3244
3245 return flags;
3246 }
3247
3248 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3249 u8 prop)
3250 {
3251 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3252 return &dev->resource[bei];
3253 #ifdef CONFIG_PCI_IOV
3254 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3255 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3256 return &dev->resource[PCI_IOV_RESOURCES +
3257 bei - PCI_EA_BEI_VF_BAR0];
3258 #endif
3259 else if (bei == PCI_EA_BEI_ROM)
3260 return &dev->resource[PCI_ROM_RESOURCE];
3261 else
3262 return NULL;
3263 }
3264
3265
3266 static int pci_ea_read(struct pci_dev *dev, int offset)
3267 {
3268 struct resource *res;
3269 int ent_size, ent_offset = offset;
3270 resource_size_t start, end;
3271 unsigned long flags;
3272 u32 dw0, bei, base, max_offset;
3273 u8 prop;
3274 bool support_64 = (sizeof(resource_size_t) >= 8);
3275
3276 pci_read_config_dword(dev, ent_offset, &dw0);
3277 ent_offset += 4;
3278
3279
3280 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3281
3282 if (!(dw0 & PCI_EA_ENABLE))
3283 goto out;
3284
3285 bei = (dw0 & PCI_EA_BEI) >> 4;
3286 prop = (dw0 & PCI_EA_PP) >> 8;
3287
3288
3289
3290
3291
3292 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3293 prop = (dw0 & PCI_EA_SP) >> 16;
3294 if (prop > PCI_EA_P_BRIDGE_IO)
3295 goto out;
3296
3297 res = pci_ea_get_resource(dev, bei, prop);
3298 if (!res) {
3299 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3300 goto out;
3301 }
3302
3303 flags = pci_ea_flags(dev, prop);
3304 if (!flags) {
3305 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3306 goto out;
3307 }
3308
3309
3310 pci_read_config_dword(dev, ent_offset, &base);
3311 start = (base & PCI_EA_FIELD_MASK);
3312 ent_offset += 4;
3313
3314
3315 pci_read_config_dword(dev, ent_offset, &max_offset);
3316 ent_offset += 4;
3317
3318
3319 if (base & PCI_EA_IS_64) {
3320 u32 base_upper;
3321
3322 pci_read_config_dword(dev, ent_offset, &base_upper);
3323 ent_offset += 4;
3324
3325 flags |= IORESOURCE_MEM_64;
3326
3327
3328 if (!support_64 && base_upper)
3329 goto out;
3330
3331 if (support_64)
3332 start |= ((u64)base_upper << 32);
3333 }
3334
3335 end = start + (max_offset | 0x03);
3336
3337
3338 if (max_offset & PCI_EA_IS_64) {
3339 u32 max_offset_upper;
3340
3341 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3342 ent_offset += 4;
3343
3344 flags |= IORESOURCE_MEM_64;
3345
3346
3347 if (!support_64 && max_offset_upper)
3348 goto out;
3349
3350 if (support_64)
3351 end += ((u64)max_offset_upper << 32);
3352 }
3353
3354 if (end < start) {
3355 pci_err(dev, "EA Entry crosses address boundary\n");
3356 goto out;
3357 }
3358
3359 if (ent_size != ent_offset - offset) {
3360 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3361 ent_size, ent_offset - offset);
3362 goto out;
3363 }
3364
3365 res->name = pci_name(dev);
3366 res->start = start;
3367 res->end = end;
3368 res->flags = flags;
3369
3370 if (bei <= PCI_EA_BEI_BAR5)
3371 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3372 bei, res, prop);
3373 else if (bei == PCI_EA_BEI_ROM)
3374 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3375 res, prop);
3376 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3377 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3378 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3379 else
3380 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3381 bei, res, prop);
3382
3383 out:
3384 return offset + ent_size;
3385 }
3386
3387
3388 void pci_ea_init(struct pci_dev *dev)
3389 {
3390 int ea;
3391 u8 num_ent;
3392 int offset;
3393 int i;
3394
3395
3396 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3397 if (!ea)
3398 return;
3399
3400
3401 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3402 &num_ent);
3403 num_ent &= PCI_EA_NUM_ENT_MASK;
3404
3405 offset = ea + PCI_EA_FIRST_ENT;
3406
3407
3408 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3409 offset += 4;
3410
3411
3412 for (i = 0; i < num_ent; ++i)
3413 offset = pci_ea_read(dev, offset);
3414 }
3415
3416 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3417 struct pci_cap_saved_state *new_cap)
3418 {
3419 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3420 }
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3431 bool extended, unsigned int size)
3432 {
3433 int pos;
3434 struct pci_cap_saved_state *save_state;
3435
3436 if (extended)
3437 pos = pci_find_ext_capability(dev, cap);
3438 else
3439 pos = pci_find_capability(dev, cap);
3440
3441 if (!pos)
3442 return 0;
3443
3444 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3445 if (!save_state)
3446 return -ENOMEM;
3447
3448 save_state->cap.cap_nr = cap;
3449 save_state->cap.cap_extended = extended;
3450 save_state->cap.size = size;
3451 pci_add_saved_cap(dev, save_state);
3452
3453 return 0;
3454 }
3455
3456 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3457 {
3458 return _pci_add_cap_save_buffer(dev, cap, false, size);
3459 }
3460
3461 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3462 {
3463 return _pci_add_cap_save_buffer(dev, cap, true, size);
3464 }
3465
3466
3467
3468
3469
3470 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3471 {
3472 int error;
3473
3474 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3475 PCI_EXP_SAVE_REGS * sizeof(u16));
3476 if (error)
3477 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3478
3479 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3480 if (error)
3481 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3482
3483 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3484 2 * sizeof(u16));
3485 if (error)
3486 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3487
3488 pci_allocate_vc_save_buffers(dev);
3489 }
3490
3491 void pci_free_cap_save_buffers(struct pci_dev *dev)
3492 {
3493 struct pci_cap_saved_state *tmp;
3494 struct hlist_node *n;
3495
3496 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3497 kfree(tmp);
3498 }
3499
3500
3501
3502
3503
3504
3505
3506
3507 void pci_configure_ari(struct pci_dev *dev)
3508 {
3509 u32 cap;
3510 struct pci_dev *bridge;
3511
3512 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3513 return;
3514
3515 bridge = dev->bus->self;
3516 if (!bridge)
3517 return;
3518
3519 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3520 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3521 return;
3522
3523 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3524 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3525 PCI_EXP_DEVCTL2_ARI);
3526 bridge->ari_enabled = 1;
3527 } else {
3528 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3529 PCI_EXP_DEVCTL2_ARI);
3530 bridge->ari_enabled = 0;
3531 }
3532 }
3533
3534 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3535 {
3536 int pos;
3537 u16 cap, ctrl;
3538
3539 pos = pdev->acs_cap;
3540 if (!pos)
3541 return false;
3542
3543
3544
3545
3546
3547
3548 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3549 acs_flags &= (cap | PCI_ACS_EC);
3550
3551 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3552 return (ctrl & acs_flags) == acs_flags;
3553 }
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3572 {
3573 int ret;
3574
3575 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3576 if (ret >= 0)
3577 return ret > 0;
3578
3579
3580
3581
3582
3583
3584 if (!pci_is_pcie(pdev))
3585 return false;
3586
3587 switch (pci_pcie_type(pdev)) {
3588
3589
3590
3591
3592
3593 case PCI_EXP_TYPE_PCIE_BRIDGE:
3594
3595
3596
3597
3598
3599
3600 case PCI_EXP_TYPE_PCI_BRIDGE:
3601 case PCI_EXP_TYPE_RC_EC:
3602 return false;
3603
3604
3605
3606
3607
3608 case PCI_EXP_TYPE_DOWNSTREAM:
3609 case PCI_EXP_TYPE_ROOT_PORT:
3610 return pci_acs_flags_enabled(pdev, acs_flags);
3611
3612
3613
3614
3615
3616
3617
3618 case PCI_EXP_TYPE_ENDPOINT:
3619 case PCI_EXP_TYPE_UPSTREAM:
3620 case PCI_EXP_TYPE_LEG_END:
3621 case PCI_EXP_TYPE_RC_END:
3622 if (!pdev->multifunction)
3623 break;
3624
3625 return pci_acs_flags_enabled(pdev, acs_flags);
3626 }
3627
3628
3629
3630
3631
3632 return true;
3633 }
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644 bool pci_acs_path_enabled(struct pci_dev *start,
3645 struct pci_dev *end, u16 acs_flags)
3646 {
3647 struct pci_dev *pdev, *parent = start;
3648
3649 do {
3650 pdev = parent;
3651
3652 if (!pci_acs_enabled(pdev, acs_flags))
3653 return false;
3654
3655 if (pci_is_root_bus(pdev->bus))
3656 return (end == NULL);
3657
3658 parent = pdev->bus->self;
3659 } while (pdev != end);
3660
3661 return true;
3662 }
3663
3664
3665
3666
3667
3668 void pci_acs_init(struct pci_dev *dev)
3669 {
3670 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3671
3672
3673
3674
3675
3676
3677
3678 pci_enable_acs(dev);
3679 }
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3691 {
3692 unsigned int pos, nbars, i;
3693 u32 ctrl;
3694
3695 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3696 if (!pos)
3697 return -ENOTSUPP;
3698
3699 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3700 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3701 PCI_REBAR_CTRL_NBAR_SHIFT;
3702
3703 for (i = 0; i < nbars; i++, pos += 8) {
3704 int bar_idx;
3705
3706 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3707 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3708 if (bar_idx == bar)
3709 return pos;
3710 }
3711
3712 return -ENOENT;
3713 }
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3724 {
3725 int pos;
3726 u32 cap;
3727
3728 pos = pci_rebar_find_pos(pdev, bar);
3729 if (pos < 0)
3730 return 0;
3731
3732 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3733 cap &= PCI_REBAR_CAP_SIZES;
3734
3735
3736 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3737 bar == 0 && cap == 0x7000)
3738 cap = 0x3f000;
3739
3740 return cap >> 4;
3741 }
3742 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3753 {
3754 int pos;
3755 u32 ctrl;
3756
3757 pos = pci_rebar_find_pos(pdev, bar);
3758 if (pos < 0)
3759 return pos;
3760
3761 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3762 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3763 }
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3775 {
3776 int pos;
3777 u32 ctrl;
3778
3779 pos = pci_rebar_find_pos(pdev, bar);
3780 if (pos < 0)
3781 return pos;
3782
3783 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3784 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3785 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3786 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3787 return 0;
3788 }
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3804 {
3805 struct pci_bus *bus = dev->bus;
3806 struct pci_dev *bridge;
3807 u32 cap, ctl2;
3808
3809
3810
3811
3812
3813
3814 if (dev->is_virtfn)
3815 return -EINVAL;
3816
3817 if (!pci_is_pcie(dev))
3818 return -EINVAL;
3819
3820
3821
3822
3823
3824
3825
3826
3827 switch (pci_pcie_type(dev)) {
3828 case PCI_EXP_TYPE_ENDPOINT:
3829 case PCI_EXP_TYPE_LEG_END:
3830 case PCI_EXP_TYPE_RC_END:
3831 break;
3832 default:
3833 return -EINVAL;
3834 }
3835
3836 while (bus->parent) {
3837 bridge = bus->self;
3838
3839 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3840
3841 switch (pci_pcie_type(bridge)) {
3842
3843 case PCI_EXP_TYPE_UPSTREAM:
3844 case PCI_EXP_TYPE_DOWNSTREAM:
3845 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3846 return -EINVAL;
3847 break;
3848
3849
3850 case PCI_EXP_TYPE_ROOT_PORT:
3851 if ((cap & cap_mask) != cap_mask)
3852 return -EINVAL;
3853 break;
3854 }
3855
3856
3857 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3858 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3859 &ctl2);
3860 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3861 return -EINVAL;
3862 }
3863
3864 bus = bus->parent;
3865 }
3866
3867 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3868 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3869 return 0;
3870 }
3871 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3885 {
3886 int slot;
3887
3888 if (pci_ari_enabled(dev->bus))
3889 slot = 0;
3890 else
3891 slot = PCI_SLOT(dev->devfn);
3892
3893 return (((pin - 1) + slot) % 4) + 1;
3894 }
3895
3896 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3897 {
3898 u8 pin;
3899
3900 pin = dev->pin;
3901 if (!pin)
3902 return -1;
3903
3904 while (!pci_is_root_bus(dev->bus)) {
3905 pin = pci_swizzle_interrupt_pin(dev, pin);
3906 dev = dev->bus->self;
3907 }
3908 *bridge = dev;
3909 return pin;
3910 }
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3921 {
3922 u8 pin = *pinp;
3923
3924 while (!pci_is_root_bus(dev->bus)) {
3925 pin = pci_swizzle_interrupt_pin(dev, pin);
3926 dev = dev->bus->self;
3927 }
3928 *pinp = pin;
3929 return PCI_SLOT(dev->devfn);
3930 }
3931 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943 void pci_release_region(struct pci_dev *pdev, int bar)
3944 {
3945 struct pci_devres *dr;
3946
3947 if (pci_resource_len(pdev, bar) == 0)
3948 return;
3949 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3950 release_region(pci_resource_start(pdev, bar),
3951 pci_resource_len(pdev, bar));
3952 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3953 release_mem_region(pci_resource_start(pdev, bar),
3954 pci_resource_len(pdev, bar));
3955
3956 dr = find_pci_dr(pdev);
3957 if (dr)
3958 dr->region_mask &= ~(1 << bar);
3959 }
3960 EXPORT_SYMBOL(pci_release_region);
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981 static int __pci_request_region(struct pci_dev *pdev, int bar,
3982 const char *res_name, int exclusive)
3983 {
3984 struct pci_devres *dr;
3985
3986 if (pci_resource_len(pdev, bar) == 0)
3987 return 0;
3988
3989 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3990 if (!request_region(pci_resource_start(pdev, bar),
3991 pci_resource_len(pdev, bar), res_name))
3992 goto err_out;
3993 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3994 if (!__request_mem_region(pci_resource_start(pdev, bar),
3995 pci_resource_len(pdev, bar), res_name,
3996 exclusive))
3997 goto err_out;
3998 }
3999
4000 dr = find_pci_dr(pdev);
4001 if (dr)
4002 dr->region_mask |= 1 << bar;
4003
4004 return 0;
4005
4006 err_out:
4007 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4008 &pdev->resource[bar]);
4009 return -EBUSY;
4010 }
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4027 {
4028 return __pci_request_region(pdev, bar, res_name, 0);
4029 }
4030 EXPORT_SYMBOL(pci_request_region);
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4041 {
4042 int i;
4043
4044 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4045 if (bars & (1 << i))
4046 pci_release_region(pdev, i);
4047 }
4048 EXPORT_SYMBOL(pci_release_selected_regions);
4049
4050 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4051 const char *res_name, int excl)
4052 {
4053 int i;
4054
4055 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4056 if (bars & (1 << i))
4057 if (__pci_request_region(pdev, i, res_name, excl))
4058 goto err_out;
4059 return 0;
4060
4061 err_out:
4062 while (--i >= 0)
4063 if (bars & (1 << i))
4064 pci_release_region(pdev, i);
4065
4066 return -EBUSY;
4067 }
4068
4069
4070
4071
4072
4073
4074
4075
4076 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4077 const char *res_name)
4078 {
4079 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4080 }
4081 EXPORT_SYMBOL(pci_request_selected_regions);
4082
4083 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4084 const char *res_name)
4085 {
4086 return __pci_request_selected_regions(pdev, bars, res_name,
4087 IORESOURCE_EXCLUSIVE);
4088 }
4089 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101 void pci_release_regions(struct pci_dev *pdev)
4102 {
4103 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4104 }
4105 EXPORT_SYMBOL(pci_release_regions);
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4121 {
4122 return pci_request_selected_regions(pdev,
4123 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4124 }
4125 EXPORT_SYMBOL(pci_request_regions);
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4143 {
4144 return pci_request_selected_regions_exclusive(pdev,
4145 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4146 }
4147 EXPORT_SYMBOL(pci_request_regions_exclusive);
4148
4149
4150
4151
4152
4153 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4154 resource_size_t size)
4155 {
4156 int ret = 0;
4157 #ifdef PCI_IOBASE
4158 struct logic_pio_hwaddr *range;
4159
4160 if (!size || addr + size < addr)
4161 return -EINVAL;
4162
4163 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4164 if (!range)
4165 return -ENOMEM;
4166
4167 range->fwnode = fwnode;
4168 range->size = size;
4169 range->hw_start = addr;
4170 range->flags = LOGIC_PIO_CPU_MMIO;
4171
4172 ret = logic_pio_register_range(range);
4173 if (ret)
4174 kfree(range);
4175
4176
4177 if (ret == -EEXIST)
4178 ret = 0;
4179 #endif
4180
4181 return ret;
4182 }
4183
4184 phys_addr_t pci_pio_to_address(unsigned long pio)
4185 {
4186 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4187
4188 #ifdef PCI_IOBASE
4189 if (pio >= MMIO_UPPER_LIMIT)
4190 return address;
4191
4192 address = logic_pio_to_hwaddr(pio);
4193 #endif
4194
4195 return address;
4196 }
4197 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4198
4199 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4200 {
4201 #ifdef PCI_IOBASE
4202 return logic_pio_trans_cpuaddr(address);
4203 #else
4204 if (address > IO_SPACE_LIMIT)
4205 return (unsigned long)-1;
4206
4207 return (unsigned long) address;
4208 #endif
4209 }
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221 #ifndef pci_remap_iospace
4222 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4223 {
4224 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4225 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4226
4227 if (!(res->flags & IORESOURCE_IO))
4228 return -EINVAL;
4229
4230 if (res->end > IO_SPACE_LIMIT)
4231 return -EINVAL;
4232
4233 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4234 pgprot_device(PAGE_KERNEL));
4235 #else
4236
4237
4238
4239
4240 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4241 return -ENODEV;
4242 #endif
4243 }
4244 EXPORT_SYMBOL(pci_remap_iospace);
4245 #endif
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255 void pci_unmap_iospace(struct resource *res)
4256 {
4257 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4258 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4259
4260 vunmap_range(vaddr, vaddr + resource_size(res));
4261 #endif
4262 }
4263 EXPORT_SYMBOL(pci_unmap_iospace);
4264
4265 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4266 {
4267 struct resource **res = ptr;
4268
4269 pci_unmap_iospace(*res);
4270 }
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4282 phys_addr_t phys_addr)
4283 {
4284 const struct resource **ptr;
4285 int error;
4286
4287 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4288 if (!ptr)
4289 return -ENOMEM;
4290
4291 error = pci_remap_iospace(res, phys_addr);
4292 if (error) {
4293 devres_free(ptr);
4294 } else {
4295 *ptr = res;
4296 devres_add(dev, ptr);
4297 }
4298
4299 return error;
4300 }
4301 EXPORT_SYMBOL(devm_pci_remap_iospace);
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4313 resource_size_t offset,
4314 resource_size_t size)
4315 {
4316 void __iomem **ptr, *addr;
4317
4318 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4319 if (!ptr)
4320 return NULL;
4321
4322 addr = pci_remap_cfgspace(offset, size);
4323 if (addr) {
4324 *ptr = addr;
4325 devres_add(dev, ptr);
4326 } else
4327 devres_free(ptr);
4328
4329 return addr;
4330 }
4331 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4353 struct resource *res)
4354 {
4355 resource_size_t size;
4356 const char *name;
4357 void __iomem *dest_ptr;
4358
4359 BUG_ON(!dev);
4360
4361 if (!res || resource_type(res) != IORESOURCE_MEM) {
4362 dev_err(dev, "invalid resource\n");
4363 return IOMEM_ERR_PTR(-EINVAL);
4364 }
4365
4366 size = resource_size(res);
4367
4368 if (res->name)
4369 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4370 res->name);
4371 else
4372 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4373 if (!name)
4374 return IOMEM_ERR_PTR(-ENOMEM);
4375
4376 if (!devm_request_mem_region(dev, res->start, size, name)) {
4377 dev_err(dev, "can't request region for resource %pR\n", res);
4378 return IOMEM_ERR_PTR(-EBUSY);
4379 }
4380
4381 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4382 if (!dest_ptr) {
4383 dev_err(dev, "ioremap failed for resource %pR\n", res);
4384 devm_release_mem_region(dev, res->start, size);
4385 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4386 }
4387
4388 return dest_ptr;
4389 }
4390 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4391
4392 static void __pci_set_master(struct pci_dev *dev, bool enable)
4393 {
4394 u16 old_cmd, cmd;
4395
4396 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4397 if (enable)
4398 cmd = old_cmd | PCI_COMMAND_MASTER;
4399 else
4400 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4401 if (cmd != old_cmd) {
4402 pci_dbg(dev, "%s bus mastering\n",
4403 enable ? "enabling" : "disabling");
4404 pci_write_config_word(dev, PCI_COMMAND, cmd);
4405 }
4406 dev->is_busmaster = enable;
4407 }
4408
4409
4410
4411
4412
4413
4414
4415
4416 char * __weak __init pcibios_setup(char *str)
4417 {
4418 return str;
4419 }
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429 void __weak pcibios_set_master(struct pci_dev *dev)
4430 {
4431 u8 lat;
4432
4433
4434 if (pci_is_pcie(dev))
4435 return;
4436
4437 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4438 if (lat < 16)
4439 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4440 else if (lat > pcibios_max_latency)
4441 lat = pcibios_max_latency;
4442 else
4443 return;
4444
4445 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4446 }
4447
4448
4449
4450
4451
4452
4453
4454
4455 void pci_set_master(struct pci_dev *dev)
4456 {
4457 __pci_set_master(dev, true);
4458 pcibios_set_master(dev);
4459 }
4460 EXPORT_SYMBOL(pci_set_master);
4461
4462
4463
4464
4465
4466 void pci_clear_master(struct pci_dev *dev)
4467 {
4468 __pci_set_master(dev, false);
4469 }
4470 EXPORT_SYMBOL(pci_clear_master);
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482 int pci_set_cacheline_size(struct pci_dev *dev)
4483 {
4484 u8 cacheline_size;
4485
4486 if (!pci_cache_line_size)
4487 return -EINVAL;
4488
4489
4490
4491 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4492 if (cacheline_size >= pci_cache_line_size &&
4493 (cacheline_size % pci_cache_line_size) == 0)
4494 return 0;
4495
4496
4497 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4498
4499 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4500 if (cacheline_size == pci_cache_line_size)
4501 return 0;
4502
4503 pci_dbg(dev, "cache line size of %d is not supported\n",
4504 pci_cache_line_size << 2);
4505
4506 return -EINVAL;
4507 }
4508 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518 int pci_set_mwi(struct pci_dev *dev)
4519 {
4520 #ifdef PCI_DISABLE_MWI
4521 return 0;
4522 #else
4523 int rc;
4524 u16 cmd;
4525
4526 rc = pci_set_cacheline_size(dev);
4527 if (rc)
4528 return rc;
4529
4530 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4531 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4532 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4533 cmd |= PCI_COMMAND_INVALIDATE;
4534 pci_write_config_word(dev, PCI_COMMAND, cmd);
4535 }
4536 return 0;
4537 #endif
4538 }
4539 EXPORT_SYMBOL(pci_set_mwi);
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549 int pcim_set_mwi(struct pci_dev *dev)
4550 {
4551 struct pci_devres *dr;
4552
4553 dr = find_pci_dr(dev);
4554 if (!dr)
4555 return -ENOMEM;
4556
4557 dr->mwi = 1;
4558 return pci_set_mwi(dev);
4559 }
4560 EXPORT_SYMBOL(pcim_set_mwi);
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571 int pci_try_set_mwi(struct pci_dev *dev)
4572 {
4573 #ifdef PCI_DISABLE_MWI
4574 return 0;
4575 #else
4576 return pci_set_mwi(dev);
4577 #endif
4578 }
4579 EXPORT_SYMBOL(pci_try_set_mwi);
4580
4581
4582
4583
4584
4585
4586
4587 void pci_clear_mwi(struct pci_dev *dev)
4588 {
4589 #ifndef PCI_DISABLE_MWI
4590 u16 cmd;
4591
4592 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4593 if (cmd & PCI_COMMAND_INVALIDATE) {
4594 cmd &= ~PCI_COMMAND_INVALIDATE;
4595 pci_write_config_word(dev, PCI_COMMAND, cmd);
4596 }
4597 #endif
4598 }
4599 EXPORT_SYMBOL(pci_clear_mwi);
4600
4601
4602
4603
4604
4605
4606
4607 void pci_disable_parity(struct pci_dev *dev)
4608 {
4609 u16 cmd;
4610
4611 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4612 if (cmd & PCI_COMMAND_PARITY) {
4613 cmd &= ~PCI_COMMAND_PARITY;
4614 pci_write_config_word(dev, PCI_COMMAND, cmd);
4615 }
4616 }
4617
4618
4619
4620
4621
4622
4623
4624
4625 void pci_intx(struct pci_dev *pdev, int enable)
4626 {
4627 u16 pci_command, new;
4628
4629 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4630
4631 if (enable)
4632 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4633 else
4634 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4635
4636 if (new != pci_command) {
4637 struct pci_devres *dr;
4638
4639 pci_write_config_word(pdev, PCI_COMMAND, new);
4640
4641 dr = find_pci_dr(pdev);
4642 if (dr && !dr->restore_intx) {
4643 dr->restore_intx = 1;
4644 dr->orig_intx = !enable;
4645 }
4646 }
4647 }
4648 EXPORT_SYMBOL_GPL(pci_intx);
4649
4650 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4651 {
4652 struct pci_bus *bus = dev->bus;
4653 bool mask_updated = true;
4654 u32 cmd_status_dword;
4655 u16 origcmd, newcmd;
4656 unsigned long flags;
4657 bool irq_pending;
4658
4659
4660
4661
4662
4663 BUILD_BUG_ON(PCI_COMMAND % 4);
4664 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4665
4666 raw_spin_lock_irqsave(&pci_lock, flags);
4667
4668 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4669
4670 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4671
4672
4673
4674
4675
4676
4677 if (mask != irq_pending) {
4678 mask_updated = false;
4679 goto done;
4680 }
4681
4682 origcmd = cmd_status_dword;
4683 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4684 if (mask)
4685 newcmd |= PCI_COMMAND_INTX_DISABLE;
4686 if (newcmd != origcmd)
4687 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4688
4689 done:
4690 raw_spin_unlock_irqrestore(&pci_lock, flags);
4691
4692 return mask_updated;
4693 }
4694
4695
4696
4697
4698
4699
4700
4701
4702 bool pci_check_and_mask_intx(struct pci_dev *dev)
4703 {
4704 return pci_check_and_set_intx_mask(dev, true);
4705 }
4706 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4717 {
4718 return pci_check_and_set_intx_mask(dev, false);
4719 }
4720 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4721
4722
4723
4724
4725
4726
4727
4728 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4729 {
4730 if (!pci_is_pcie(dev))
4731 return 1;
4732
4733 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4734 PCI_EXP_DEVSTA_TRPND);
4735 }
4736 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4737
4738
4739
4740
4741
4742
4743
4744
4745 int pcie_flr(struct pci_dev *dev)
4746 {
4747 if (!pci_wait_for_pending_transaction(dev))
4748 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4749
4750 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4751
4752 if (dev->imm_ready)
4753 return 0;
4754
4755
4756
4757
4758
4759
4760 msleep(100);
4761
4762 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4763 }
4764 EXPORT_SYMBOL_GPL(pcie_flr);
4765
4766
4767
4768
4769
4770
4771
4772
4773 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4774 {
4775 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4776 return -ENOTTY;
4777
4778 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4779 return -ENOTTY;
4780
4781 if (probe)
4782 return 0;
4783
4784 return pcie_flr(dev);
4785 }
4786 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4787
4788 static int pci_af_flr(struct pci_dev *dev, bool probe)
4789 {
4790 int pos;
4791 u8 cap;
4792
4793 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4794 if (!pos)
4795 return -ENOTTY;
4796
4797 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4798 return -ENOTTY;
4799
4800 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4801 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4802 return -ENOTTY;
4803
4804 if (probe)
4805 return 0;
4806
4807
4808
4809
4810
4811
4812 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4813 PCI_AF_STATUS_TP << 8))
4814 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4815
4816 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4817
4818 if (dev->imm_ready)
4819 return 0;
4820
4821
4822
4823
4824
4825
4826
4827 msleep(100);
4828
4829 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4830 }
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4848 {
4849 u16 csr;
4850
4851 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4852 return -ENOTTY;
4853
4854 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4855 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4856 return -ENOTTY;
4857
4858 if (probe)
4859 return 0;
4860
4861 if (dev->current_state != PCI_D0)
4862 return -EINVAL;
4863
4864 csr &= ~PCI_PM_CTRL_STATE_MASK;
4865 csr |= PCI_D3hot;
4866 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4867 pci_dev_d3_sleep(dev);
4868
4869 csr &= ~PCI_PM_CTRL_STATE_MASK;
4870 csr |= PCI_D0;
4871 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4872 pci_dev_d3_sleep(dev);
4873
4874 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4875 }
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4886 int delay)
4887 {
4888 int timeout = 1000;
4889 bool ret;
4890 u16 lnk_status;
4891
4892
4893
4894
4895
4896 if (!pdev->link_active_reporting) {
4897 msleep(timeout + delay);
4898 return true;
4899 }
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910 if (active)
4911 msleep(20);
4912 for (;;) {
4913 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4914 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4915 if (ret == active)
4916 break;
4917 if (timeout <= 0)
4918 break;
4919 msleep(10);
4920 timeout -= 10;
4921 }
4922 if (active && ret)
4923 msleep(delay);
4924
4925 return ret == active;
4926 }
4927
4928
4929
4930
4931
4932
4933
4934
4935 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4936 {
4937 return pcie_wait_for_link_delay(pdev, active, 100);
4938 }
4939
4940
4941
4942
4943
4944
4945
4946
4947 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4948 {
4949 const struct pci_dev *pdev;
4950 int min_delay = 100;
4951 int max_delay = 0;
4952
4953 list_for_each_entry(pdev, &bus->devices, bus_list) {
4954 if (pdev->d3cold_delay < min_delay)
4955 min_delay = pdev->d3cold_delay;
4956 if (pdev->d3cold_delay > max_delay)
4957 max_delay = pdev->d3cold_delay;
4958 }
4959
4960 return max(min_delay, max_delay);
4961 }
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4975 {
4976 struct pci_dev *child;
4977 int delay;
4978
4979 if (pci_dev_is_disconnected(dev))
4980 return;
4981
4982 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4983 return;
4984
4985 down_read(&pci_bus_sem);
4986
4987
4988
4989
4990
4991
4992
4993 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4994 up_read(&pci_bus_sem);
4995 return;
4996 }
4997
4998
4999 delay = pci_bus_max_d3cold_delay(dev->subordinate);
5000 if (!delay) {
5001 up_read(&pci_bus_sem);
5002 return;
5003 }
5004
5005 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
5006 bus_list);
5007 up_read(&pci_bus_sem);
5008
5009
5010
5011
5012
5013
5014
5015 if (!pci_is_pcie(dev)) {
5016 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
5017 msleep(1000 + delay);
5018 return;
5019 }
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038 if (!pcie_downstream_port(dev))
5039 return;
5040
5041 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5042 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5043 msleep(delay);
5044 } else {
5045 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5046 delay);
5047 if (!pcie_wait_for_link_delay(dev, true, delay)) {
5048
5049 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5050 return;
5051 }
5052 }
5053
5054 if (!pci_device_is_present(child)) {
5055 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
5056 msleep(delay);
5057 }
5058 }
5059
5060 void pci_reset_secondary_bus(struct pci_dev *dev)
5061 {
5062 u16 ctrl;
5063
5064 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5065 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5066 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5067
5068
5069
5070
5071
5072 msleep(2);
5073
5074 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5075 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5076
5077
5078
5079
5080
5081
5082
5083
5084 ssleep(1);
5085 }
5086
5087 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5088 {
5089 pci_reset_secondary_bus(dev);
5090 }
5091
5092
5093
5094
5095
5096
5097
5098
5099 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5100 {
5101 pcibios_reset_secondary_bus(dev);
5102
5103 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
5104 }
5105 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5106
5107 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5108 {
5109 struct pci_dev *pdev;
5110
5111 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5112 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5113 return -ENOTTY;
5114
5115 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5116 if (pdev != dev)
5117 return -ENOTTY;
5118
5119 if (probe)
5120 return 0;
5121
5122 return pci_bridge_secondary_bus_reset(dev->bus->self);
5123 }
5124
5125 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5126 {
5127 int rc = -ENOTTY;
5128
5129 if (!hotplug || !try_module_get(hotplug->owner))
5130 return rc;
5131
5132 if (hotplug->ops->reset_slot)
5133 rc = hotplug->ops->reset_slot(hotplug, probe);
5134
5135 module_put(hotplug->owner);
5136
5137 return rc;
5138 }
5139
5140 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5141 {
5142 if (dev->multifunction || dev->subordinate || !dev->slot ||
5143 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5144 return -ENOTTY;
5145
5146 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5147 }
5148
5149 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5150 {
5151 int rc;
5152
5153 rc = pci_dev_reset_slot_function(dev, probe);
5154 if (rc != -ENOTTY)
5155 return rc;
5156 return pci_parent_bus_reset(dev, probe);
5157 }
5158
5159 void pci_dev_lock(struct pci_dev *dev)
5160 {
5161
5162 device_lock(&dev->dev);
5163 pci_cfg_access_lock(dev);
5164 }
5165 EXPORT_SYMBOL_GPL(pci_dev_lock);
5166
5167
5168 int pci_dev_trylock(struct pci_dev *dev)
5169 {
5170 if (device_trylock(&dev->dev)) {
5171 if (pci_cfg_access_trylock(dev))
5172 return 1;
5173 device_unlock(&dev->dev);
5174 }
5175
5176 return 0;
5177 }
5178 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5179
5180 void pci_dev_unlock(struct pci_dev *dev)
5181 {
5182 pci_cfg_access_unlock(dev);
5183 device_unlock(&dev->dev);
5184 }
5185 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5186
5187 static void pci_dev_save_and_disable(struct pci_dev *dev)
5188 {
5189 const struct pci_error_handlers *err_handler =
5190 dev->driver ? dev->driver->err_handler : NULL;
5191
5192
5193
5194
5195
5196
5197 if (err_handler && err_handler->reset_prepare)
5198 err_handler->reset_prepare(dev);
5199
5200
5201
5202
5203
5204
5205 pci_set_power_state(dev, PCI_D0);
5206
5207 pci_save_state(dev);
5208
5209
5210
5211
5212
5213
5214
5215 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5216 }
5217
5218 static void pci_dev_restore(struct pci_dev *dev)
5219 {
5220 const struct pci_error_handlers *err_handler =
5221 dev->driver ? dev->driver->err_handler : NULL;
5222
5223 pci_restore_state(dev);
5224
5225
5226
5227
5228
5229
5230 if (err_handler && err_handler->reset_done)
5231 err_handler->reset_done(dev);
5232 }
5233
5234
5235 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5236 { },
5237 { pci_dev_specific_reset, .name = "device_specific" },
5238 { pci_dev_acpi_reset, .name = "acpi" },
5239 { pcie_reset_flr, .name = "flr" },
5240 { pci_af_flr, .name = "af_flr" },
5241 { pci_pm_reset, .name = "pm" },
5242 { pci_reset_bus_function, .name = "bus" },
5243 };
5244
5245 static ssize_t reset_method_show(struct device *dev,
5246 struct device_attribute *attr, char *buf)
5247 {
5248 struct pci_dev *pdev = to_pci_dev(dev);
5249 ssize_t len = 0;
5250 int i, m;
5251
5252 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5253 m = pdev->reset_methods[i];
5254 if (!m)
5255 break;
5256
5257 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5258 pci_reset_fn_methods[m].name);
5259 }
5260
5261 if (len)
5262 len += sysfs_emit_at(buf, len, "\n");
5263
5264 return len;
5265 }
5266
5267 static int reset_method_lookup(const char *name)
5268 {
5269 int m;
5270
5271 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5272 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5273 return m;
5274 }
5275
5276 return 0;
5277 }
5278
5279 static ssize_t reset_method_store(struct device *dev,
5280 struct device_attribute *attr,
5281 const char *buf, size_t count)
5282 {
5283 struct pci_dev *pdev = to_pci_dev(dev);
5284 char *options, *name;
5285 int m, n;
5286 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5287
5288 if (sysfs_streq(buf, "")) {
5289 pdev->reset_methods[0] = 0;
5290 pci_warn(pdev, "All device reset methods disabled by user");
5291 return count;
5292 }
5293
5294 if (sysfs_streq(buf, "default")) {
5295 pci_init_reset_methods(pdev);
5296 return count;
5297 }
5298
5299 options = kstrndup(buf, count, GFP_KERNEL);
5300 if (!options)
5301 return -ENOMEM;
5302
5303 n = 0;
5304 while ((name = strsep(&options, " ")) != NULL) {
5305 if (sysfs_streq(name, ""))
5306 continue;
5307
5308 name = strim(name);
5309
5310 m = reset_method_lookup(name);
5311 if (!m) {
5312 pci_err(pdev, "Invalid reset method '%s'", name);
5313 goto error;
5314 }
5315
5316 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5317 pci_err(pdev, "Unsupported reset method '%s'", name);
5318 goto error;
5319 }
5320
5321 if (n == PCI_NUM_RESET_METHODS - 1) {
5322 pci_err(pdev, "Too many reset methods\n");
5323 goto error;
5324 }
5325
5326 reset_methods[n++] = m;
5327 }
5328
5329 reset_methods[n] = 0;
5330
5331
5332 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5333 reset_methods[0] != 1)
5334 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5335 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5336 kfree(options);
5337 return count;
5338
5339 error:
5340
5341 kfree(options);
5342 return -EINVAL;
5343 }
5344 static DEVICE_ATTR_RW(reset_method);
5345
5346 static struct attribute *pci_dev_reset_method_attrs[] = {
5347 &dev_attr_reset_method.attr,
5348 NULL,
5349 };
5350
5351 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5352 struct attribute *a, int n)
5353 {
5354 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5355
5356 if (!pci_reset_supported(pdev))
5357 return 0;
5358
5359 return a->mode;
5360 }
5361
5362 const struct attribute_group pci_dev_reset_method_attr_group = {
5363 .attrs = pci_dev_reset_method_attrs,
5364 .is_visible = pci_dev_reset_method_attr_is_visible,
5365 };
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387 int __pci_reset_function_locked(struct pci_dev *dev)
5388 {
5389 int i, m, rc;
5390
5391 might_sleep();
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5402 m = dev->reset_methods[i];
5403 if (!m)
5404 return -ENOTTY;
5405
5406 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5407 if (!rc)
5408 return 0;
5409 if (rc != -ENOTTY)
5410 return rc;
5411 }
5412
5413 return -ENOTTY;
5414 }
5415 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429 void pci_init_reset_methods(struct pci_dev *dev)
5430 {
5431 int m, i, rc;
5432
5433 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5434
5435 might_sleep();
5436
5437 i = 0;
5438 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5439 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5440 if (!rc)
5441 dev->reset_methods[i++] = m;
5442 else if (rc != -ENOTTY)
5443 break;
5444 }
5445
5446 dev->reset_methods[i] = 0;
5447 }
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465 int pci_reset_function(struct pci_dev *dev)
5466 {
5467 int rc;
5468
5469 if (!pci_reset_supported(dev))
5470 return -ENOTTY;
5471
5472 pci_dev_lock(dev);
5473 pci_dev_save_and_disable(dev);
5474
5475 rc = __pci_reset_function_locked(dev);
5476
5477 pci_dev_restore(dev);
5478 pci_dev_unlock(dev);
5479
5480 return rc;
5481 }
5482 EXPORT_SYMBOL_GPL(pci_reset_function);
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501 int pci_reset_function_locked(struct pci_dev *dev)
5502 {
5503 int rc;
5504
5505 if (!pci_reset_supported(dev))
5506 return -ENOTTY;
5507
5508 pci_dev_save_and_disable(dev);
5509
5510 rc = __pci_reset_function_locked(dev);
5511
5512 pci_dev_restore(dev);
5513
5514 return rc;
5515 }
5516 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5517
5518
5519
5520
5521
5522
5523
5524 int pci_try_reset_function(struct pci_dev *dev)
5525 {
5526 int rc;
5527
5528 if (!pci_reset_supported(dev))
5529 return -ENOTTY;
5530
5531 if (!pci_dev_trylock(dev))
5532 return -EAGAIN;
5533
5534 pci_dev_save_and_disable(dev);
5535 rc = __pci_reset_function_locked(dev);
5536 pci_dev_restore(dev);
5537 pci_dev_unlock(dev);
5538
5539 return rc;
5540 }
5541 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5542
5543
5544 static bool pci_bus_resetable(struct pci_bus *bus)
5545 {
5546 struct pci_dev *dev;
5547
5548
5549 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5550 return false;
5551
5552 list_for_each_entry(dev, &bus->devices, bus_list) {
5553 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5554 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5555 return false;
5556 }
5557
5558 return true;
5559 }
5560
5561
5562 static void pci_bus_lock(struct pci_bus *bus)
5563 {
5564 struct pci_dev *dev;
5565
5566 list_for_each_entry(dev, &bus->devices, bus_list) {
5567 pci_dev_lock(dev);
5568 if (dev->subordinate)
5569 pci_bus_lock(dev->subordinate);
5570 }
5571 }
5572
5573
5574 static void pci_bus_unlock(struct pci_bus *bus)
5575 {
5576 struct pci_dev *dev;
5577
5578 list_for_each_entry(dev, &bus->devices, bus_list) {
5579 if (dev->subordinate)
5580 pci_bus_unlock(dev->subordinate);
5581 pci_dev_unlock(dev);
5582 }
5583 }
5584
5585
5586 static int pci_bus_trylock(struct pci_bus *bus)
5587 {
5588 struct pci_dev *dev;
5589
5590 list_for_each_entry(dev, &bus->devices, bus_list) {
5591 if (!pci_dev_trylock(dev))
5592 goto unlock;
5593 if (dev->subordinate) {
5594 if (!pci_bus_trylock(dev->subordinate)) {
5595 pci_dev_unlock(dev);
5596 goto unlock;
5597 }
5598 }
5599 }
5600 return 1;
5601
5602 unlock:
5603 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5604 if (dev->subordinate)
5605 pci_bus_unlock(dev->subordinate);
5606 pci_dev_unlock(dev);
5607 }
5608 return 0;
5609 }
5610
5611
5612 static bool pci_slot_resetable(struct pci_slot *slot)
5613 {
5614 struct pci_dev *dev;
5615
5616 if (slot->bus->self &&
5617 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5618 return false;
5619
5620 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5621 if (!dev->slot || dev->slot != slot)
5622 continue;
5623 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5624 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5625 return false;
5626 }
5627
5628 return true;
5629 }
5630
5631
5632 static void pci_slot_lock(struct pci_slot *slot)
5633 {
5634 struct pci_dev *dev;
5635
5636 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5637 if (!dev->slot || dev->slot != slot)
5638 continue;
5639 pci_dev_lock(dev);
5640 if (dev->subordinate)
5641 pci_bus_lock(dev->subordinate);
5642 }
5643 }
5644
5645
5646 static void pci_slot_unlock(struct pci_slot *slot)
5647 {
5648 struct pci_dev *dev;
5649
5650 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5651 if (!dev->slot || dev->slot != slot)
5652 continue;
5653 if (dev->subordinate)
5654 pci_bus_unlock(dev->subordinate);
5655 pci_dev_unlock(dev);
5656 }
5657 }
5658
5659
5660 static int pci_slot_trylock(struct pci_slot *slot)
5661 {
5662 struct pci_dev *dev;
5663
5664 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5665 if (!dev->slot || dev->slot != slot)
5666 continue;
5667 if (!pci_dev_trylock(dev))
5668 goto unlock;
5669 if (dev->subordinate) {
5670 if (!pci_bus_trylock(dev->subordinate)) {
5671 pci_dev_unlock(dev);
5672 goto unlock;
5673 }
5674 }
5675 }
5676 return 1;
5677
5678 unlock:
5679 list_for_each_entry_continue_reverse(dev,
5680 &slot->bus->devices, bus_list) {
5681 if (!dev->slot || dev->slot != slot)
5682 continue;
5683 if (dev->subordinate)
5684 pci_bus_unlock(dev->subordinate);
5685 pci_dev_unlock(dev);
5686 }
5687 return 0;
5688 }
5689
5690
5691
5692
5693
5694 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5695 {
5696 struct pci_dev *dev;
5697
5698 list_for_each_entry(dev, &bus->devices, bus_list) {
5699 pci_dev_save_and_disable(dev);
5700 if (dev->subordinate)
5701 pci_bus_save_and_disable_locked(dev->subordinate);
5702 }
5703 }
5704
5705
5706
5707
5708
5709
5710 static void pci_bus_restore_locked(struct pci_bus *bus)
5711 {
5712 struct pci_dev *dev;
5713
5714 list_for_each_entry(dev, &bus->devices, bus_list) {
5715 pci_dev_restore(dev);
5716 if (dev->subordinate)
5717 pci_bus_restore_locked(dev->subordinate);
5718 }
5719 }
5720
5721
5722
5723
5724
5725 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5726 {
5727 struct pci_dev *dev;
5728
5729 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5730 if (!dev->slot || dev->slot != slot)
5731 continue;
5732 pci_dev_save_and_disable(dev);
5733 if (dev->subordinate)
5734 pci_bus_save_and_disable_locked(dev->subordinate);
5735 }
5736 }
5737
5738
5739
5740
5741
5742
5743 static void pci_slot_restore_locked(struct pci_slot *slot)
5744 {
5745 struct pci_dev *dev;
5746
5747 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5748 if (!dev->slot || dev->slot != slot)
5749 continue;
5750 pci_dev_restore(dev);
5751 if (dev->subordinate)
5752 pci_bus_restore_locked(dev->subordinate);
5753 }
5754 }
5755
5756 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5757 {
5758 int rc;
5759
5760 if (!slot || !pci_slot_resetable(slot))
5761 return -ENOTTY;
5762
5763 if (!probe)
5764 pci_slot_lock(slot);
5765
5766 might_sleep();
5767
5768 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5769
5770 if (!probe)
5771 pci_slot_unlock(slot);
5772
5773 return rc;
5774 }
5775
5776
5777
5778
5779
5780
5781
5782 int pci_probe_reset_slot(struct pci_slot *slot)
5783 {
5784 return pci_slot_reset(slot, PCI_RESET_PROBE);
5785 }
5786 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803 static int __pci_reset_slot(struct pci_slot *slot)
5804 {
5805 int rc;
5806
5807 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5808 if (rc)
5809 return rc;
5810
5811 if (pci_slot_trylock(slot)) {
5812 pci_slot_save_and_disable_locked(slot);
5813 might_sleep();
5814 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5815 pci_slot_restore_locked(slot);
5816 pci_slot_unlock(slot);
5817 } else
5818 rc = -EAGAIN;
5819
5820 return rc;
5821 }
5822
5823 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5824 {
5825 int ret;
5826
5827 if (!bus->self || !pci_bus_resetable(bus))
5828 return -ENOTTY;
5829
5830 if (probe)
5831 return 0;
5832
5833 pci_bus_lock(bus);
5834
5835 might_sleep();
5836
5837 ret = pci_bridge_secondary_bus_reset(bus->self);
5838
5839 pci_bus_unlock(bus);
5840
5841 return ret;
5842 }
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852 int pci_bus_error_reset(struct pci_dev *bridge)
5853 {
5854 struct pci_bus *bus = bridge->subordinate;
5855 struct pci_slot *slot;
5856
5857 if (!bus)
5858 return -ENOTTY;
5859
5860 mutex_lock(&pci_slot_mutex);
5861 if (list_empty(&bus->slots))
5862 goto bus_reset;
5863
5864 list_for_each_entry(slot, &bus->slots, list)
5865 if (pci_probe_reset_slot(slot))
5866 goto bus_reset;
5867
5868 list_for_each_entry(slot, &bus->slots, list)
5869 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5870 goto bus_reset;
5871
5872 mutex_unlock(&pci_slot_mutex);
5873 return 0;
5874 bus_reset:
5875 mutex_unlock(&pci_slot_mutex);
5876 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5877 }
5878
5879
5880
5881
5882
5883
5884
5885 int pci_probe_reset_bus(struct pci_bus *bus)
5886 {
5887 return pci_bus_reset(bus, PCI_RESET_PROBE);
5888 }
5889 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5890
5891
5892
5893
5894
5895
5896
5897 static int __pci_reset_bus(struct pci_bus *bus)
5898 {
5899 int rc;
5900
5901 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5902 if (rc)
5903 return rc;
5904
5905 if (pci_bus_trylock(bus)) {
5906 pci_bus_save_and_disable_locked(bus);
5907 might_sleep();
5908 rc = pci_bridge_secondary_bus_reset(bus->self);
5909 pci_bus_restore_locked(bus);
5910 pci_bus_unlock(bus);
5911 } else
5912 rc = -EAGAIN;
5913
5914 return rc;
5915 }
5916
5917
5918
5919
5920
5921
5922
5923 int pci_reset_bus(struct pci_dev *pdev)
5924 {
5925 return (!pci_probe_reset_slot(pdev->slot)) ?
5926 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5927 }
5928 EXPORT_SYMBOL_GPL(pci_reset_bus);
5929
5930
5931
5932
5933
5934
5935
5936
5937 int pcix_get_max_mmrbc(struct pci_dev *dev)
5938 {
5939 int cap;
5940 u32 stat;
5941
5942 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5943 if (!cap)
5944 return -EINVAL;
5945
5946 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5947 return -EINVAL;
5948
5949 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5950 }
5951 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5952
5953
5954
5955
5956
5957
5958
5959
5960 int pcix_get_mmrbc(struct pci_dev *dev)
5961 {
5962 int cap;
5963 u16 cmd;
5964
5965 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5966 if (!cap)
5967 return -EINVAL;
5968
5969 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5970 return -EINVAL;
5971
5972 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5973 }
5974 EXPORT_SYMBOL(pcix_get_mmrbc);
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5986 {
5987 int cap;
5988 u32 stat, v, o;
5989 u16 cmd;
5990
5991 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5992 return -EINVAL;
5993
5994 v = ffs(mmrbc) - 10;
5995
5996 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5997 if (!cap)
5998 return -EINVAL;
5999
6000 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6001 return -EINVAL;
6002
6003 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
6004 return -E2BIG;
6005
6006 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6007 return -EINVAL;
6008
6009 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
6010 if (o != v) {
6011 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6012 return -EIO;
6013
6014 cmd &= ~PCI_X_CMD_MAX_READ;
6015 cmd |= v << 2;
6016 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6017 return -EIO;
6018 }
6019 return 0;
6020 }
6021 EXPORT_SYMBOL(pcix_set_mmrbc);
6022
6023
6024
6025
6026
6027
6028
6029 int pcie_get_readrq(struct pci_dev *dev)
6030 {
6031 u16 ctl;
6032
6033 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6034
6035 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6036 }
6037 EXPORT_SYMBOL(pcie_get_readrq);
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047 int pcie_set_readrq(struct pci_dev *dev, int rq)
6048 {
6049 u16 v;
6050 int ret;
6051
6052 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6053 return -EINVAL;
6054
6055
6056
6057
6058
6059
6060 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6061 int mps = pcie_get_mps(dev);
6062
6063 if (mps < rq)
6064 rq = mps;
6065 }
6066
6067 v = (ffs(rq) - 8) << 12;
6068
6069 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6070 PCI_EXP_DEVCTL_READRQ, v);
6071
6072 return pcibios_err_to_errno(ret);
6073 }
6074 EXPORT_SYMBOL(pcie_set_readrq);
6075
6076
6077
6078
6079
6080
6081
6082 int pcie_get_mps(struct pci_dev *dev)
6083 {
6084 u16 ctl;
6085
6086 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6087
6088 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6089 }
6090 EXPORT_SYMBOL(pcie_get_mps);
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100 int pcie_set_mps(struct pci_dev *dev, int mps)
6101 {
6102 u16 v;
6103 int ret;
6104
6105 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6106 return -EINVAL;
6107
6108 v = ffs(mps) - 8;
6109 if (v > dev->pcie_mpss)
6110 return -EINVAL;
6111 v <<= 5;
6112
6113 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6114 PCI_EXP_DEVCTL_PAYLOAD, v);
6115
6116 return pcibios_err_to_errno(ret);
6117 }
6118 EXPORT_SYMBOL(pcie_set_mps);
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6135 enum pci_bus_speed *speed,
6136 enum pcie_link_width *width)
6137 {
6138 u16 lnksta;
6139 enum pci_bus_speed next_speed;
6140 enum pcie_link_width next_width;
6141 u32 bw, next_bw;
6142
6143 if (speed)
6144 *speed = PCI_SPEED_UNKNOWN;
6145 if (width)
6146 *width = PCIE_LNK_WIDTH_UNKNOWN;
6147
6148 bw = 0;
6149
6150 while (dev) {
6151 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6152
6153 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6154 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
6155 PCI_EXP_LNKSTA_NLW_SHIFT;
6156
6157 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6158
6159
6160 if (!bw || next_bw <= bw) {
6161 bw = next_bw;
6162
6163 if (limiting_dev)
6164 *limiting_dev = dev;
6165 if (speed)
6166 *speed = next_speed;
6167 if (width)
6168 *width = next_width;
6169 }
6170
6171 dev = pci_upstream_bridge(dev);
6172 }
6173
6174 return bw;
6175 }
6176 EXPORT_SYMBOL(pcie_bandwidth_available);
6177
6178
6179
6180
6181
6182
6183
6184
6185 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6186 {
6187 u32 lnkcap2, lnkcap;
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6199
6200
6201 if (lnkcap2)
6202 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6203
6204 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6205 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6206 return PCIE_SPEED_5_0GT;
6207 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6208 return PCIE_SPEED_2_5GT;
6209
6210 return PCI_SPEED_UNKNOWN;
6211 }
6212 EXPORT_SYMBOL(pcie_get_speed_cap);
6213
6214
6215
6216
6217
6218
6219
6220
6221 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6222 {
6223 u32 lnkcap;
6224
6225 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6226 if (lnkcap)
6227 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
6228
6229 return PCIE_LNK_WIDTH_UNKNOWN;
6230 }
6231 EXPORT_SYMBOL(pcie_get_width_cap);
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6244 enum pcie_link_width *width)
6245 {
6246 *speed = pcie_get_speed_cap(dev);
6247 *width = pcie_get_width_cap(dev);
6248
6249 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6250 return 0;
6251
6252 return *width * PCIE_SPEED2MBS_ENC(*speed);
6253 }
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6266 {
6267 enum pcie_link_width width, width_cap;
6268 enum pci_bus_speed speed, speed_cap;
6269 struct pci_dev *limiting_dev = NULL;
6270 u32 bw_avail, bw_cap;
6271
6272 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6274
6275 if (bw_avail >= bw_cap && verbose)
6276 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6277 bw_cap / 1000, bw_cap % 1000,
6278 pci_speed_string(speed_cap), width_cap);
6279 else if (bw_avail < bw_cap)
6280 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6281 bw_avail / 1000, bw_avail % 1000,
6282 pci_speed_string(speed), width,
6283 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6284 bw_cap / 1000, bw_cap % 1000,
6285 pci_speed_string(speed_cap), width_cap);
6286 }
6287
6288
6289
6290
6291
6292
6293
6294 void pcie_print_link_status(struct pci_dev *dev)
6295 {
6296 __pcie_print_link_status(dev, true);
6297 }
6298 EXPORT_SYMBOL(pcie_print_link_status);
6299
6300
6301
6302
6303
6304
6305
6306
6307 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6308 {
6309 int i, bars = 0;
6310 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6311 if (pci_resource_flags(dev, i) & flags)
6312 bars |= (1 << i);
6313 return bars;
6314 }
6315 EXPORT_SYMBOL(pci_select_bars);
6316
6317
6318 static arch_set_vga_state_t arch_set_vga_state;
6319
6320 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6321 {
6322 arch_set_vga_state = func;
6323 }
6324
6325 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6326 unsigned int command_bits, u32 flags)
6327 {
6328 if (arch_set_vga_state)
6329 return arch_set_vga_state(dev, decode, command_bits,
6330 flags);
6331 return 0;
6332 }
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6343 unsigned int command_bits, u32 flags)
6344 {
6345 struct pci_bus *bus;
6346 struct pci_dev *bridge;
6347 u16 cmd;
6348 int rc;
6349
6350 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6351
6352
6353 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6354 if (rc)
6355 return rc;
6356
6357 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6358 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6359 if (decode)
6360 cmd |= command_bits;
6361 else
6362 cmd &= ~command_bits;
6363 pci_write_config_word(dev, PCI_COMMAND, cmd);
6364 }
6365
6366 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6367 return 0;
6368
6369 bus = dev->bus;
6370 while (bus) {
6371 bridge = bus->self;
6372 if (bridge) {
6373 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6374 &cmd);
6375 if (decode)
6376 cmd |= PCI_BRIDGE_CTL_VGA;
6377 else
6378 cmd &= ~PCI_BRIDGE_CTL_VGA;
6379 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6380 cmd);
6381 }
6382 bus = bus->parent;
6383 }
6384 return 0;
6385 }
6386
6387 #ifdef CONFIG_ACPI
6388 bool pci_pr3_present(struct pci_dev *pdev)
6389 {
6390 struct acpi_device *adev;
6391
6392 if (acpi_disabled)
6393 return false;
6394
6395 adev = ACPI_COMPANION(&pdev->dev);
6396 if (!adev)
6397 return false;
6398
6399 return adev->power.flags.power_resources &&
6400 acpi_has_method(adev->handle, "_PR3");
6401 }
6402 EXPORT_SYMBOL_GPL(pci_pr3_present);
6403 #endif
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6426 unsigned int nr_devfns)
6427 {
6428 int devfn_to;
6429
6430 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6431 devfn_to = devfn_from + nr_devfns - 1;
6432
6433 if (!dev->dma_alias_mask)
6434 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6435 if (!dev->dma_alias_mask) {
6436 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6437 return;
6438 }
6439
6440 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6441
6442 if (nr_devfns == 1)
6443 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6444 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6445 else if (nr_devfns > 1)
6446 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6447 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6448 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6449 }
6450
6451 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6452 {
6453 return (dev1->dma_alias_mask &&
6454 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6455 (dev2->dma_alias_mask &&
6456 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6457 pci_real_dma_dev(dev1) == dev2 ||
6458 pci_real_dma_dev(dev2) == dev1;
6459 }
6460
6461 bool pci_device_is_present(struct pci_dev *pdev)
6462 {
6463 u32 v;
6464
6465 if (pci_dev_is_disconnected(pdev))
6466 return false;
6467 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6468 }
6469 EXPORT_SYMBOL_GPL(pci_device_is_present);
6470
6471 void pci_ignore_hotplug(struct pci_dev *dev)
6472 {
6473 struct pci_dev *bridge = dev->bus->self;
6474
6475 dev->ignore_hotplug = 1;
6476
6477 if (bridge)
6478 bridge->ignore_hotplug = 1;
6479 }
6480 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6493 {
6494 return dev;
6495 }
6496
6497 resource_size_t __weak pcibios_default_alignment(void)
6498 {
6499 return 0;
6500 }
6501
6502
6503
6504
6505
6506 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6507 const struct resource *rsrc,
6508 resource_size_t *start, resource_size_t *end)
6509 {
6510 *start = rsrc->start;
6511 *end = rsrc->end;
6512 }
6513
6514 static char *resource_alignment_param;
6515 static DEFINE_SPINLOCK(resource_alignment_lock);
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6526 bool *resize)
6527 {
6528 int align_order, count;
6529 resource_size_t align = pcibios_default_alignment();
6530 const char *p;
6531 int ret;
6532
6533 spin_lock(&resource_alignment_lock);
6534 p = resource_alignment_param;
6535 if (!p || !*p)
6536 goto out;
6537 if (pci_has_flag(PCI_PROBE_ONLY)) {
6538 align = 0;
6539 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6540 goto out;
6541 }
6542
6543 while (*p) {
6544 count = 0;
6545 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6546 p[count] == '@') {
6547 p += count + 1;
6548 if (align_order > 63) {
6549 pr_err("PCI: Invalid requested alignment (order %d)\n",
6550 align_order);
6551 align_order = PAGE_SHIFT;
6552 }
6553 } else {
6554 align_order = PAGE_SHIFT;
6555 }
6556
6557 ret = pci_dev_str_match(dev, p, &p);
6558 if (ret == 1) {
6559 *resize = true;
6560 align = 1ULL << align_order;
6561 break;
6562 } else if (ret < 0) {
6563 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6564 p);
6565 break;
6566 }
6567
6568 if (*p != ';' && *p != ',') {
6569
6570 break;
6571 }
6572 p++;
6573 }
6574 out:
6575 spin_unlock(&resource_alignment_lock);
6576 return align;
6577 }
6578
6579 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6580 resource_size_t align, bool resize)
6581 {
6582 struct resource *r = &dev->resource[bar];
6583 resource_size_t size;
6584
6585 if (!(r->flags & IORESOURCE_MEM))
6586 return;
6587
6588 if (r->flags & IORESOURCE_PCI_FIXED) {
6589 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6590 bar, r, (unsigned long long)align);
6591 return;
6592 }
6593
6594 size = resource_size(r);
6595 if (size >= align)
6596 return;
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6627 bar, r, (unsigned long long)align);
6628
6629 if (resize) {
6630 r->start = 0;
6631 r->end = align - 1;
6632 } else {
6633 r->flags &= ~IORESOURCE_SIZEALIGN;
6634 r->flags |= IORESOURCE_STARTALIGN;
6635 r->start = align;
6636 r->end = r->start + size - 1;
6637 }
6638 r->flags |= IORESOURCE_UNSET;
6639 }
6640
6641
6642
6643
6644
6645
6646
6647
6648 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6649 {
6650 int i;
6651 struct resource *r;
6652 resource_size_t align;
6653 u16 command;
6654 bool resize = false;
6655
6656
6657
6658
6659
6660
6661
6662 if (dev->is_virtfn)
6663 return;
6664
6665
6666 align = pci_specified_resource_alignment(dev, &resize);
6667 if (!align)
6668 return;
6669
6670 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6671 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6672 pci_warn(dev, "Can't reassign resources to host bridge\n");
6673 return;
6674 }
6675
6676 pci_read_config_word(dev, PCI_COMMAND, &command);
6677 command &= ~PCI_COMMAND_MEMORY;
6678 pci_write_config_word(dev, PCI_COMMAND, command);
6679
6680 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6681 pci_request_resource_alignment(dev, i, align, resize);
6682
6683
6684
6685
6686
6687
6688 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6689 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6690 r = &dev->resource[i];
6691 if (!(r->flags & IORESOURCE_MEM))
6692 continue;
6693 r->flags |= IORESOURCE_UNSET;
6694 r->end = resource_size(r) - 1;
6695 r->start = 0;
6696 }
6697 pci_disable_bridge_window(dev);
6698 }
6699 }
6700
6701 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6702 {
6703 size_t count = 0;
6704
6705 spin_lock(&resource_alignment_lock);
6706 if (resource_alignment_param)
6707 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6708 spin_unlock(&resource_alignment_lock);
6709
6710 return count;
6711 }
6712
6713 static ssize_t resource_alignment_store(struct bus_type *bus,
6714 const char *buf, size_t count)
6715 {
6716 char *param, *old, *end;
6717
6718 if (count >= (PAGE_SIZE - 1))
6719 return -EINVAL;
6720
6721 param = kstrndup(buf, count, GFP_KERNEL);
6722 if (!param)
6723 return -ENOMEM;
6724
6725 end = strchr(param, '\n');
6726 if (end)
6727 *end = '\0';
6728
6729 spin_lock(&resource_alignment_lock);
6730 old = resource_alignment_param;
6731 if (strlen(param)) {
6732 resource_alignment_param = param;
6733 } else {
6734 kfree(param);
6735 resource_alignment_param = NULL;
6736 }
6737 spin_unlock(&resource_alignment_lock);
6738
6739 kfree(old);
6740
6741 return count;
6742 }
6743
6744 static BUS_ATTR_RW(resource_alignment);
6745
6746 static int __init pci_resource_alignment_sysfs_init(void)
6747 {
6748 return bus_create_file(&pci_bus_type,
6749 &bus_attr_resource_alignment);
6750 }
6751 late_initcall(pci_resource_alignment_sysfs_init);
6752
6753 static void pci_no_domains(void)
6754 {
6755 #ifdef CONFIG_PCI_DOMAINS
6756 pci_domains_supported = 0;
6757 #endif
6758 }
6759
6760 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6761 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6762
6763 static int pci_get_new_domain_nr(void)
6764 {
6765 return atomic_inc_return(&__domain_nr);
6766 }
6767
6768 static int of_pci_bus_find_domain_nr(struct device *parent)
6769 {
6770 static int use_dt_domains = -1;
6771 int domain = -1;
6772
6773 if (parent)
6774 domain = of_get_pci_domain_nr(parent->of_node);
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802 if (domain >= 0 && use_dt_domains) {
6803 use_dt_domains = 1;
6804 } else if (domain < 0 && use_dt_domains != 1) {
6805 use_dt_domains = 0;
6806 domain = pci_get_new_domain_nr();
6807 } else {
6808 if (parent)
6809 pr_err("Node %pOF has ", parent->of_node);
6810 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6811 domain = -1;
6812 }
6813
6814 return domain;
6815 }
6816
6817 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6818 {
6819 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6820 acpi_pci_bus_find_domain_nr(bus);
6821 }
6822 #endif
6823
6824
6825
6826
6827
6828
6829
6830
6831 int __weak pci_ext_cfg_avail(void)
6832 {
6833 return 1;
6834 }
6835
6836 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6837 {
6838 }
6839 EXPORT_SYMBOL(pci_fixup_cardbus);
6840
6841 static int __init pci_setup(char *str)
6842 {
6843 while (str) {
6844 char *k = strchr(str, ',');
6845 if (k)
6846 *k++ = 0;
6847 if (*str && (str = pcibios_setup(str)) && *str) {
6848 if (!strcmp(str, "nomsi")) {
6849 pci_no_msi();
6850 } else if (!strncmp(str, "noats", 5)) {
6851 pr_info("PCIe: ATS is disabled\n");
6852 pcie_ats_disabled = true;
6853 } else if (!strcmp(str, "noaer")) {
6854 pci_no_aer();
6855 } else if (!strcmp(str, "earlydump")) {
6856 pci_early_dump = true;
6857 } else if (!strncmp(str, "realloc=", 8)) {
6858 pci_realloc_get_opt(str + 8);
6859 } else if (!strncmp(str, "realloc", 7)) {
6860 pci_realloc_get_opt("on");
6861 } else if (!strcmp(str, "nodomains")) {
6862 pci_no_domains();
6863 } else if (!strncmp(str, "noari", 5)) {
6864 pcie_ari_disabled = true;
6865 } else if (!strncmp(str, "cbiosize=", 9)) {
6866 pci_cardbus_io_size = memparse(str + 9, &str);
6867 } else if (!strncmp(str, "cbmemsize=", 10)) {
6868 pci_cardbus_mem_size = memparse(str + 10, &str);
6869 } else if (!strncmp(str, "resource_alignment=", 19)) {
6870 resource_alignment_param = str + 19;
6871 } else if (!strncmp(str, "ecrc=", 5)) {
6872 pcie_ecrc_get_policy(str + 5);
6873 } else if (!strncmp(str, "hpiosize=", 9)) {
6874 pci_hotplug_io_size = memparse(str + 9, &str);
6875 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6876 pci_hotplug_mmio_size = memparse(str + 11, &str);
6877 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6878 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6879 } else if (!strncmp(str, "hpmemsize=", 10)) {
6880 pci_hotplug_mmio_size = memparse(str + 10, &str);
6881 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6882 } else if (!strncmp(str, "hpbussize=", 10)) {
6883 pci_hotplug_bus_size =
6884 simple_strtoul(str + 10, &str, 0);
6885 if (pci_hotplug_bus_size > 0xff)
6886 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6887 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6888 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6889 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6890 pcie_bus_config = PCIE_BUS_SAFE;
6891 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6892 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6893 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6894 pcie_bus_config = PCIE_BUS_PEER2PEER;
6895 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6896 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6897 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6898 disable_acs_redir_param = str + 18;
6899 } else {
6900 pr_err("PCI: Unknown option `%s'\n", str);
6901 }
6902 }
6903 str = k;
6904 }
6905 return 0;
6906 }
6907 early_param("pci", pci_setup);
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918 static int __init pci_realloc_setup_params(void)
6919 {
6920 resource_alignment_param = kstrdup(resource_alignment_param,
6921 GFP_KERNEL);
6922 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6923
6924 return 0;
6925 }
6926 pure_initcall(pci_realloc_setup_params);