Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * mmconfig-shared.c - Low-level direct PCI config space access via
0004  *                     MMCONFIG - common code between i386 and x86-64.
0005  *
0006  * This code does:
0007  * - known chipset handling
0008  * - ACPI decoding and validation
0009  *
0010  * Per-architecture code takes care of the mappings and accesses
0011  * themselves.
0012  */
0013 
0014 #include <linux/acpi.h>
0015 #include <linux/pci.h>
0016 #include <linux/init.h>
0017 #include <linux/bitmap.h>
0018 #include <linux/dmi.h>
0019 #include <linux/slab.h>
0020 #include <linux/mutex.h>
0021 #include <linux/rculist.h>
0022 #include <asm/e820/api.h>
0023 #include <asm/pci_x86.h>
0024 #include <asm/acpi.h>
0025 
0026 #define PREFIX "PCI: "
0027 
0028 /* Indicate if the mmcfg resources have been placed into the resource table. */
0029 static bool pci_mmcfg_running_state;
0030 static bool pci_mmcfg_arch_init_failed;
0031 static DEFINE_MUTEX(pci_mmcfg_lock);
0032 #define pci_mmcfg_lock_held() lock_is_held(&(pci_mmcfg_lock).dep_map)
0033 
0034 LIST_HEAD(pci_mmcfg_list);
0035 
0036 static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
0037 {
0038     if (cfg->res.parent)
0039         release_resource(&cfg->res);
0040     list_del(&cfg->list);
0041     kfree(cfg);
0042 }
0043 
0044 static void __init free_all_mmcfg(void)
0045 {
0046     struct pci_mmcfg_region *cfg, *tmp;
0047 
0048     pci_mmcfg_arch_free();
0049     list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
0050         pci_mmconfig_remove(cfg);
0051 }
0052 
0053 static void list_add_sorted(struct pci_mmcfg_region *new)
0054 {
0055     struct pci_mmcfg_region *cfg;
0056 
0057     /* keep list sorted by segment and starting bus number */
0058     list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held()) {
0059         if (cfg->segment > new->segment ||
0060             (cfg->segment == new->segment &&
0061              cfg->start_bus >= new->start_bus)) {
0062             list_add_tail_rcu(&new->list, &cfg->list);
0063             return;
0064         }
0065     }
0066     list_add_tail_rcu(&new->list, &pci_mmcfg_list);
0067 }
0068 
0069 static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
0070                            int end, u64 addr)
0071 {
0072     struct pci_mmcfg_region *new;
0073     struct resource *res;
0074 
0075     if (addr == 0)
0076         return NULL;
0077 
0078     new = kzalloc(sizeof(*new), GFP_KERNEL);
0079     if (!new)
0080         return NULL;
0081 
0082     new->address = addr;
0083     new->segment = segment;
0084     new->start_bus = start;
0085     new->end_bus = end;
0086 
0087     res = &new->res;
0088     res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
0089     res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
0090     res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
0091     snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
0092          "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
0093     res->name = new->name;
0094 
0095     return new;
0096 }
0097 
0098 struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
0099                          int end, u64 addr)
0100 {
0101     struct pci_mmcfg_region *new;
0102 
0103     new = pci_mmconfig_alloc(segment, start, end, addr);
0104     if (new) {
0105         mutex_lock(&pci_mmcfg_lock);
0106         list_add_sorted(new);
0107         mutex_unlock(&pci_mmcfg_lock);
0108 
0109         pr_info(PREFIX
0110                "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
0111                "(base %#lx)\n",
0112                segment, start, end, &new->res, (unsigned long)addr);
0113     }
0114 
0115     return new;
0116 }
0117 
0118 struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
0119 {
0120     struct pci_mmcfg_region *cfg;
0121 
0122     list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held())
0123         if (cfg->segment == segment &&
0124             cfg->start_bus <= bus && bus <= cfg->end_bus)
0125             return cfg;
0126 
0127     return NULL;
0128 }
0129 
0130 static const char *__init pci_mmcfg_e7520(void)
0131 {
0132     u32 win;
0133     raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
0134 
0135     win = win & 0xf000;
0136     if (win == 0x0000 || win == 0xf000)
0137         return NULL;
0138 
0139     if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
0140         return NULL;
0141 
0142     return "Intel Corporation E7520 Memory Controller Hub";
0143 }
0144 
0145 static const char *__init pci_mmcfg_intel_945(void)
0146 {
0147     u32 pciexbar, mask = 0, len = 0;
0148 
0149     raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
0150 
0151     /* Enable bit */
0152     if (!(pciexbar & 1))
0153         return NULL;
0154 
0155     /* Size bits */
0156     switch ((pciexbar >> 1) & 3) {
0157     case 0:
0158         mask = 0xf0000000U;
0159         len  = 0x10000000U;
0160         break;
0161     case 1:
0162         mask = 0xf8000000U;
0163         len  = 0x08000000U;
0164         break;
0165     case 2:
0166         mask = 0xfc000000U;
0167         len  = 0x04000000U;
0168         break;
0169     default:
0170         return NULL;
0171     }
0172 
0173     /* Errata #2, things break when not aligned on a 256Mb boundary */
0174     /* Can only happen in 64M/128M mode */
0175 
0176     if ((pciexbar & mask) & 0x0fffffffU)
0177         return NULL;
0178 
0179     /* Don't hit the APIC registers and their friends */
0180     if ((pciexbar & mask) >= 0xf0000000U)
0181         return NULL;
0182 
0183     if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
0184         return NULL;
0185 
0186     return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
0187 }
0188 
0189 static const char *__init pci_mmcfg_amd_fam10h(void)
0190 {
0191     u32 low, high, address;
0192     u64 base, msr;
0193     int i;
0194     unsigned segnbits = 0, busnbits, end_bus;
0195 
0196     if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
0197         return NULL;
0198 
0199     address = MSR_FAM10H_MMIO_CONF_BASE;
0200     if (rdmsr_safe(address, &low, &high))
0201         return NULL;
0202 
0203     msr = high;
0204     msr <<= 32;
0205     msr |= low;
0206 
0207     /* mmconfig is not enable */
0208     if (!(msr & FAM10H_MMIO_CONF_ENABLE))
0209         return NULL;
0210 
0211     base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
0212 
0213     busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
0214              FAM10H_MMIO_CONF_BUSRANGE_MASK;
0215 
0216     /*
0217      * only handle bus 0 ?
0218      * need to skip it
0219      */
0220     if (!busnbits)
0221         return NULL;
0222 
0223     if (busnbits > 8) {
0224         segnbits = busnbits - 8;
0225         busnbits = 8;
0226     }
0227 
0228     end_bus = (1 << busnbits) - 1;
0229     for (i = 0; i < (1 << segnbits); i++)
0230         if (pci_mmconfig_add(i, 0, end_bus,
0231                      base + (1<<28) * i) == NULL) {
0232             free_all_mmcfg();
0233             return NULL;
0234         }
0235 
0236     return "AMD Family 10h NB";
0237 }
0238 
0239 static bool __initdata mcp55_checked;
0240 static const char *__init pci_mmcfg_nvidia_mcp55(void)
0241 {
0242     int bus;
0243     int mcp55_mmconf_found = 0;
0244 
0245     static const u32 extcfg_regnum __initconst  = 0x90;
0246     static const u32 extcfg_regsize __initconst = 4;
0247     static const u32 extcfg_enable_mask __initconst = 1 << 31;
0248     static const u32 extcfg_start_mask __initconst  = 0xff << 16;
0249     static const int extcfg_start_shift __initconst = 16;
0250     static const u32 extcfg_size_mask __initconst   = 0x3 << 28;
0251     static const int extcfg_size_shift __initconst  = 28;
0252     static const int extcfg_sizebus[] __initconst   = {
0253         0x100, 0x80, 0x40, 0x20
0254     };
0255     static const u32 extcfg_base_mask[] __initconst = {
0256         0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
0257     };
0258     static const int extcfg_base_lshift __initconst = 25;
0259 
0260     /*
0261      * do check if amd fam10h already took over
0262      */
0263     if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
0264         return NULL;
0265 
0266     mcp55_checked = true;
0267     for (bus = 0; bus < 256; bus++) {
0268         u64 base;
0269         u32 l, extcfg;
0270         u16 vendor, device;
0271         int start, size_index, end;
0272 
0273         raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
0274         vendor = l & 0xffff;
0275         device = (l >> 16) & 0xffff;
0276 
0277         if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
0278             continue;
0279 
0280         raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
0281                   extcfg_regsize, &extcfg);
0282 
0283         if (!(extcfg & extcfg_enable_mask))
0284             continue;
0285 
0286         size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
0287         base = extcfg & extcfg_base_mask[size_index];
0288         /* base could > 4G */
0289         base <<= extcfg_base_lshift;
0290         start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
0291         end = start + extcfg_sizebus[size_index] - 1;
0292         if (pci_mmconfig_add(0, start, end, base) == NULL)
0293             continue;
0294         mcp55_mmconf_found++;
0295     }
0296 
0297     if (!mcp55_mmconf_found)
0298         return NULL;
0299 
0300     return "nVidia MCP55";
0301 }
0302 
0303 struct pci_mmcfg_hostbridge_probe {
0304     u32 bus;
0305     u32 devfn;
0306     u32 vendor;
0307     u32 device;
0308     const char *(*probe)(void);
0309 };
0310 
0311 static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
0312     { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
0313       PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
0314     { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
0315       PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
0316     { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD,
0317       0x1200, pci_mmcfg_amd_fam10h },
0318     { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
0319       0x1200, pci_mmcfg_amd_fam10h },
0320     { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
0321       0x0369, pci_mmcfg_nvidia_mcp55 },
0322 };
0323 
0324 static void __init pci_mmcfg_check_end_bus_number(void)
0325 {
0326     struct pci_mmcfg_region *cfg, *cfgx;
0327 
0328     /* Fixup overlaps */
0329     list_for_each_entry(cfg, &pci_mmcfg_list, list) {
0330         if (cfg->end_bus < cfg->start_bus)
0331             cfg->end_bus = 255;
0332 
0333         /* Don't access the list head ! */
0334         if (cfg->list.next == &pci_mmcfg_list)
0335             break;
0336 
0337         cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
0338         if (cfg->end_bus >= cfgx->start_bus)
0339             cfg->end_bus = cfgx->start_bus - 1;
0340     }
0341 }
0342 
0343 static int __init pci_mmcfg_check_hostbridge(void)
0344 {
0345     u32 l;
0346     u32 bus, devfn;
0347     u16 vendor, device;
0348     int i;
0349     const char *name;
0350 
0351     if (!raw_pci_ops)
0352         return 0;
0353 
0354     free_all_mmcfg();
0355 
0356     for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
0357         bus =  pci_mmcfg_probes[i].bus;
0358         devfn = pci_mmcfg_probes[i].devfn;
0359         raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
0360         vendor = l & 0xffff;
0361         device = (l >> 16) & 0xffff;
0362 
0363         name = NULL;
0364         if (pci_mmcfg_probes[i].vendor == vendor &&
0365             pci_mmcfg_probes[i].device == device)
0366             name = pci_mmcfg_probes[i].probe();
0367 
0368         if (name)
0369             pr_info(PREFIX "%s with MMCONFIG support\n", name);
0370     }
0371 
0372     /* some end_bus_number is crazy, fix it */
0373     pci_mmcfg_check_end_bus_number();
0374 
0375     return !list_empty(&pci_mmcfg_list);
0376 }
0377 
0378 static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
0379 {
0380     struct resource *mcfg_res = data;
0381     struct acpi_resource_address64 address;
0382     acpi_status status;
0383 
0384     if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
0385         struct acpi_resource_fixed_memory32 *fixmem32 =
0386             &res->data.fixed_memory32;
0387         if (!fixmem32)
0388             return AE_OK;
0389         if ((mcfg_res->start >= fixmem32->address) &&
0390             (mcfg_res->end < (fixmem32->address +
0391                       fixmem32->address_length))) {
0392             mcfg_res->flags = 1;
0393             return AE_CTRL_TERMINATE;
0394         }
0395     }
0396     if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) &&
0397         (res->type != ACPI_RESOURCE_TYPE_ADDRESS64))
0398         return AE_OK;
0399 
0400     status = acpi_resource_to_address64(res, &address);
0401     if (ACPI_FAILURE(status) ||
0402        (address.address.address_length <= 0) ||
0403        (address.resource_type != ACPI_MEMORY_RANGE))
0404         return AE_OK;
0405 
0406     if ((mcfg_res->start >= address.address.minimum) &&
0407         (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
0408         mcfg_res->flags = 1;
0409         return AE_CTRL_TERMINATE;
0410     }
0411     return AE_OK;
0412 }
0413 
0414 static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl,
0415                     void *context, void **rv)
0416 {
0417     struct resource *mcfg_res = context;
0418 
0419     acpi_walk_resources(handle, METHOD_NAME__CRS,
0420                 check_mcfg_resource, context);
0421 
0422     if (mcfg_res->flags)
0423         return AE_CTRL_TERMINATE;
0424 
0425     return AE_OK;
0426 }
0427 
0428 static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
0429 {
0430     struct resource mcfg_res;
0431 
0432     mcfg_res.start = start;
0433     mcfg_res.end = end - 1;
0434     mcfg_res.flags = 0;
0435 
0436     acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
0437 
0438     if (!mcfg_res.flags)
0439         acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res,
0440                  NULL);
0441 
0442     return mcfg_res.flags;
0443 }
0444 
0445 typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
0446 
0447 static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
0448                      struct pci_mmcfg_region *cfg,
0449                      struct device *dev, int with_e820)
0450 {
0451     u64 addr = cfg->res.start;
0452     u64 size = resource_size(&cfg->res);
0453     u64 old_size = size;
0454     int num_buses;
0455     char *method = with_e820 ? "E820" : "ACPI motherboard resources";
0456 
0457     while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
0458         size >>= 1;
0459         if (size < (16UL<<20))
0460             break;
0461     }
0462 
0463     if (size < (16UL<<20) && size != old_size)
0464         return false;
0465 
0466     if (dev)
0467         dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
0468              &cfg->res, method);
0469     else
0470         pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
0471                &cfg->res, method);
0472 
0473     if (old_size != size) {
0474         /* update end_bus */
0475         cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
0476         num_buses = cfg->end_bus - cfg->start_bus + 1;
0477         cfg->res.end = cfg->res.start +
0478             PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
0479         snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
0480              "PCI MMCONFIG %04x [bus %02x-%02x]",
0481              cfg->segment, cfg->start_bus, cfg->end_bus);
0482 
0483         if (dev)
0484             dev_info(dev,
0485                 "MMCONFIG "
0486                 "at %pR (base %#lx) (size reduced!)\n",
0487                 &cfg->res, (unsigned long) cfg->address);
0488         else
0489             pr_info(PREFIX
0490                 "MMCONFIG for %04x [bus%02x-%02x] "
0491                 "at %pR (base %#lx) (size reduced!)\n",
0492                 cfg->segment, cfg->start_bus, cfg->end_bus,
0493                 &cfg->res, (unsigned long) cfg->address);
0494     }
0495 
0496     return true;
0497 }
0498 
0499 static bool __ref
0500 pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
0501 {
0502     if (!early && !acpi_disabled) {
0503         if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
0504             return true;
0505 
0506         if (dev)
0507             dev_info(dev, FW_INFO
0508                  "MMCONFIG at %pR not reserved in "
0509                  "ACPI motherboard resources\n",
0510                  &cfg->res);
0511         else
0512             pr_info(FW_INFO PREFIX
0513                    "MMCONFIG at %pR not reserved in "
0514                    "ACPI motherboard resources\n",
0515                    &cfg->res);
0516     }
0517 
0518     /*
0519      * e820__mapped_all() is marked as __init.
0520      * All entries from ACPI MCFG table have been checked at boot time.
0521      * For MCFG information constructed from hotpluggable host bridge's
0522      * _CBA method, just assume it's reserved.
0523      */
0524     if (pci_mmcfg_running_state)
0525         return true;
0526 
0527     /* Don't try to do this check unless configuration
0528        type 1 is available. how about type 2 ?*/
0529     if (raw_pci_ops)
0530         return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
0531 
0532     return false;
0533 }
0534 
0535 static void __init pci_mmcfg_reject_broken(int early)
0536 {
0537     struct pci_mmcfg_region *cfg;
0538 
0539     list_for_each_entry(cfg, &pci_mmcfg_list, list) {
0540         if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
0541             pr_info(PREFIX "not using MMCONFIG\n");
0542             free_all_mmcfg();
0543             return;
0544         }
0545     }
0546 }
0547 
0548 static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
0549                     struct acpi_mcfg_allocation *cfg)
0550 {
0551     if (cfg->address < 0xFFFFFFFF)
0552         return 0;
0553 
0554     if (!strncmp(mcfg->header.oem_id, "SGI", 3))
0555         return 0;
0556 
0557     if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010))
0558         return 0;
0559 
0560     pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
0561            "is above 4GB, ignored\n", cfg->pci_segment,
0562            cfg->start_bus_number, cfg->end_bus_number, cfg->address);
0563     return -EINVAL;
0564 }
0565 
0566 static int __init pci_parse_mcfg(struct acpi_table_header *header)
0567 {
0568     struct acpi_table_mcfg *mcfg;
0569     struct acpi_mcfg_allocation *cfg_table, *cfg;
0570     unsigned long i;
0571     int entries;
0572 
0573     if (!header)
0574         return -EINVAL;
0575 
0576     mcfg = (struct acpi_table_mcfg *)header;
0577 
0578     /* how many config structures do we have */
0579     free_all_mmcfg();
0580     entries = 0;
0581     i = header->length - sizeof(struct acpi_table_mcfg);
0582     while (i >= sizeof(struct acpi_mcfg_allocation)) {
0583         entries++;
0584         i -= sizeof(struct acpi_mcfg_allocation);
0585     }
0586     if (entries == 0) {
0587         pr_err(PREFIX "MMCONFIG has no entries\n");
0588         return -ENODEV;
0589     }
0590 
0591     cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
0592     for (i = 0; i < entries; i++) {
0593         cfg = &cfg_table[i];
0594         if (acpi_mcfg_check_entry(mcfg, cfg)) {
0595             free_all_mmcfg();
0596             return -ENODEV;
0597         }
0598 
0599         if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
0600                    cfg->end_bus_number, cfg->address) == NULL) {
0601             pr_warn(PREFIX "no memory for MCFG entries\n");
0602             free_all_mmcfg();
0603             return -ENOMEM;
0604         }
0605     }
0606 
0607     return 0;
0608 }
0609 
0610 #ifdef CONFIG_ACPI_APEI
0611 extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
0612                      void *data), void *data);
0613 
0614 static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
0615                      void *data), void *data)
0616 {
0617     struct pci_mmcfg_region *cfg;
0618     int rc;
0619 
0620     if (list_empty(&pci_mmcfg_list))
0621         return 0;
0622 
0623     list_for_each_entry(cfg, &pci_mmcfg_list, list) {
0624         rc = func(cfg->res.start, resource_size(&cfg->res), data);
0625         if (rc)
0626             return rc;
0627     }
0628 
0629     return 0;
0630 }
0631 #define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
0632 #else
0633 #define set_apei_filter()
0634 #endif
0635 
0636 static void __init __pci_mmcfg_init(int early)
0637 {
0638     pci_mmcfg_reject_broken(early);
0639     if (list_empty(&pci_mmcfg_list))
0640         return;
0641 
0642     if (pcibios_last_bus < 0) {
0643         const struct pci_mmcfg_region *cfg;
0644 
0645         list_for_each_entry(cfg, &pci_mmcfg_list, list) {
0646             if (cfg->segment)
0647                 break;
0648             pcibios_last_bus = cfg->end_bus;
0649         }
0650     }
0651 
0652     if (pci_mmcfg_arch_init())
0653         pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
0654     else {
0655         free_all_mmcfg();
0656         pci_mmcfg_arch_init_failed = true;
0657     }
0658 }
0659 
0660 static int __initdata known_bridge;
0661 
0662 void __init pci_mmcfg_early_init(void)
0663 {
0664     if (pci_probe & PCI_PROBE_MMCONF) {
0665         if (pci_mmcfg_check_hostbridge())
0666             known_bridge = 1;
0667         else
0668             acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
0669         __pci_mmcfg_init(1);
0670 
0671         set_apei_filter();
0672     }
0673 }
0674 
0675 void __init pci_mmcfg_late_init(void)
0676 {
0677     /* MMCONFIG disabled */
0678     if ((pci_probe & PCI_PROBE_MMCONF) == 0)
0679         return;
0680 
0681     if (known_bridge)
0682         return;
0683 
0684     /* MMCONFIG hasn't been enabled yet, try again */
0685     if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
0686         acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
0687         __pci_mmcfg_init(0);
0688     }
0689 }
0690 
0691 static int __init pci_mmcfg_late_insert_resources(void)
0692 {
0693     struct pci_mmcfg_region *cfg;
0694 
0695     pci_mmcfg_running_state = true;
0696 
0697     /* If we are not using MMCONFIG, don't insert the resources. */
0698     if ((pci_probe & PCI_PROBE_MMCONF) == 0)
0699         return 1;
0700 
0701     /*
0702      * Attempt to insert the mmcfg resources but not with the busy flag
0703      * marked so it won't cause request errors when __request_region is
0704      * called.
0705      */
0706     list_for_each_entry(cfg, &pci_mmcfg_list, list)
0707         if (!cfg->res.parent)
0708             insert_resource(&iomem_resource, &cfg->res);
0709 
0710     return 0;
0711 }
0712 
0713 /*
0714  * Perform MMCONFIG resource insertion after PCI initialization to allow for
0715  * misprogrammed MCFG tables that state larger sizes but actually conflict
0716  * with other system resources.
0717  */
0718 late_initcall(pci_mmcfg_late_insert_resources);
0719 
0720 /* Add MMCFG information for host bridges */
0721 int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
0722             phys_addr_t addr)
0723 {
0724     int rc;
0725     struct resource *tmp = NULL;
0726     struct pci_mmcfg_region *cfg;
0727 
0728     if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
0729         return -ENODEV;
0730 
0731     if (start > end)
0732         return -EINVAL;
0733 
0734     mutex_lock(&pci_mmcfg_lock);
0735     cfg = pci_mmconfig_lookup(seg, start);
0736     if (cfg) {
0737         if (cfg->end_bus < end)
0738             dev_info(dev, FW_INFO
0739                  "MMCONFIG for "
0740                  "domain %04x [bus %02x-%02x] "
0741                  "only partially covers this bridge\n",
0742                   cfg->segment, cfg->start_bus, cfg->end_bus);
0743         mutex_unlock(&pci_mmcfg_lock);
0744         return -EEXIST;
0745     }
0746 
0747     if (!addr) {
0748         mutex_unlock(&pci_mmcfg_lock);
0749         return -EINVAL;
0750     }
0751 
0752     rc = -EBUSY;
0753     cfg = pci_mmconfig_alloc(seg, start, end, addr);
0754     if (cfg == NULL) {
0755         dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
0756         rc = -ENOMEM;
0757     } else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
0758         dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
0759              &cfg->res);
0760     } else {
0761         /* Insert resource if it's not in boot stage */
0762         if (pci_mmcfg_running_state)
0763             tmp = insert_resource_conflict(&iomem_resource,
0764                                &cfg->res);
0765 
0766         if (tmp) {
0767             dev_warn(dev,
0768                  "MMCONFIG %pR conflicts with "
0769                  "%s %pR\n",
0770                  &cfg->res, tmp->name, tmp);
0771         } else if (pci_mmcfg_arch_map(cfg)) {
0772             dev_warn(dev, "fail to map MMCONFIG %pR.\n",
0773                  &cfg->res);
0774         } else {
0775             list_add_sorted(cfg);
0776             dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
0777                  &cfg->res, (unsigned long)addr);
0778             cfg = NULL;
0779             rc = 0;
0780         }
0781     }
0782 
0783     if (cfg) {
0784         if (cfg->res.parent)
0785             release_resource(&cfg->res);
0786         kfree(cfg);
0787     }
0788 
0789     mutex_unlock(&pci_mmcfg_lock);
0790 
0791     return rc;
0792 }
0793 
0794 /* Delete MMCFG information for host bridges */
0795 int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
0796 {
0797     struct pci_mmcfg_region *cfg;
0798 
0799     mutex_lock(&pci_mmcfg_lock);
0800     list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
0801         if (cfg->segment == seg && cfg->start_bus == start &&
0802             cfg->end_bus == end) {
0803             list_del_rcu(&cfg->list);
0804             synchronize_rcu();
0805             pci_mmcfg_arch_unmap(cfg);
0806             if (cfg->res.parent)
0807                 release_resource(&cfg->res);
0808             mutex_unlock(&pci_mmcfg_lock);
0809             kfree(cfg);
0810             return 0;
0811         }
0812     mutex_unlock(&pci_mmcfg_lock);
0813 
0814     return -ENOENT;
0815 }