Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Support Intel uncore PerfMon discovery mechanism.
0004  * Copyright(c) 2021 Intel Corporation.
0005  */
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 
0008 #include "uncore.h"
0009 #include "uncore_discovery.h"
0010 
0011 static struct rb_root discovery_tables = RB_ROOT;
0012 static int num_discovered_types[UNCORE_ACCESS_MAX];
0013 
0014 static bool has_generic_discovery_table(void)
0015 {
0016     struct pci_dev *dev;
0017     int dvsec;
0018 
0019     dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
0020     if (!dev)
0021         return false;
0022 
0023     /* A discovery table device has the unique capability ID. */
0024     dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
0025     pci_dev_put(dev);
0026     if (dvsec)
0027         return true;
0028 
0029     return false;
0030 }
0031 
0032 static int logical_die_id;
0033 
0034 static int get_device_die_id(struct pci_dev *dev)
0035 {
0036     int cpu, node = pcibus_to_node(dev->bus);
0037 
0038     /*
0039      * If the NUMA info is not available, assume that the logical die id is
0040      * continuous in the order in which the discovery table devices are
0041      * detected.
0042      */
0043     if (node < 0)
0044         return logical_die_id++;
0045 
0046     for_each_cpu(cpu, cpumask_of_node(node)) {
0047         struct cpuinfo_x86 *c = &cpu_data(cpu);
0048 
0049         if (c->initialized && cpu_to_node(cpu) == node)
0050             return c->logical_die_id;
0051     }
0052 
0053     /*
0054      * All CPUs of a node may be offlined. For this case,
0055      * the PCI and MMIO type of uncore blocks which are
0056      * enumerated by the device will be unavailable.
0057      */
0058     return -1;
0059 }
0060 
0061 #define __node_2_type(cur)  \
0062     rb_entry((cur), struct intel_uncore_discovery_type, node)
0063 
0064 static inline int __type_cmp(const void *key, const struct rb_node *b)
0065 {
0066     struct intel_uncore_discovery_type *type_b = __node_2_type(b);
0067     const u16 *type_id = key;
0068 
0069     if (type_b->type > *type_id)
0070         return -1;
0071     else if (type_b->type < *type_id)
0072         return 1;
0073 
0074     return 0;
0075 }
0076 
0077 static inline struct intel_uncore_discovery_type *
0078 search_uncore_discovery_type(u16 type_id)
0079 {
0080     struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
0081 
0082     return (node) ? __node_2_type(node) : NULL;
0083 }
0084 
0085 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
0086 {
0087     return (__node_2_type(a)->type < __node_2_type(b)->type);
0088 }
0089 
0090 static struct intel_uncore_discovery_type *
0091 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
0092 {
0093     struct intel_uncore_discovery_type *type;
0094 
0095     if (unit->access_type >= UNCORE_ACCESS_MAX) {
0096         pr_warn("Unsupported access type %d\n", unit->access_type);
0097         return NULL;
0098     }
0099 
0100     type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
0101     if (!type)
0102         return NULL;
0103 
0104     type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
0105     if (!type->box_ctrl_die)
0106         goto free_type;
0107 
0108     type->access_type = unit->access_type;
0109     num_discovered_types[type->access_type]++;
0110     type->type = unit->box_type;
0111 
0112     rb_add(&type->node, &discovery_tables, __type_less);
0113 
0114     return type;
0115 
0116 free_type:
0117     kfree(type);
0118 
0119     return NULL;
0120 
0121 }
0122 
0123 static struct intel_uncore_discovery_type *
0124 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
0125 {
0126     struct intel_uncore_discovery_type *type;
0127 
0128     type = search_uncore_discovery_type(unit->box_type);
0129     if (type)
0130         return type;
0131 
0132     return add_uncore_discovery_type(unit);
0133 }
0134 
0135 static void
0136 uncore_insert_box_info(struct uncore_unit_discovery *unit,
0137                int die, bool parsed)
0138 {
0139     struct intel_uncore_discovery_type *type;
0140     unsigned int *box_offset, *ids;
0141     int i;
0142 
0143     if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset))
0144         return;
0145 
0146     if (parsed) {
0147         type = search_uncore_discovery_type(unit->box_type);
0148         if (WARN_ON_ONCE(!type))
0149             return;
0150         /* Store the first box of each die */
0151         if (!type->box_ctrl_die[die])
0152             type->box_ctrl_die[die] = unit->ctl;
0153         return;
0154     }
0155 
0156     type = get_uncore_discovery_type(unit);
0157     if (!type)
0158         return;
0159 
0160     box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
0161     if (!box_offset)
0162         return;
0163 
0164     ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
0165     if (!ids)
0166         goto free_box_offset;
0167 
0168     /* Store generic information for the first box */
0169     if (!type->num_boxes) {
0170         type->box_ctrl = unit->ctl;
0171         type->box_ctrl_die[die] = unit->ctl;
0172         type->num_counters = unit->num_regs;
0173         type->counter_width = unit->bit_width;
0174         type->ctl_offset = unit->ctl_offset;
0175         type->ctr_offset = unit->ctr_offset;
0176         *ids = unit->box_id;
0177         goto end;
0178     }
0179 
0180     for (i = 0; i < type->num_boxes; i++) {
0181         ids[i] = type->ids[i];
0182         box_offset[i] = type->box_offset[i];
0183 
0184         if (WARN_ON_ONCE(unit->box_id == ids[i]))
0185             goto free_ids;
0186     }
0187     ids[i] = unit->box_id;
0188     box_offset[i] = unit->ctl - type->box_ctrl;
0189     kfree(type->ids);
0190     kfree(type->box_offset);
0191 end:
0192     type->ids = ids;
0193     type->box_offset = box_offset;
0194     type->num_boxes++;
0195     return;
0196 
0197 free_ids:
0198     kfree(ids);
0199 
0200 free_box_offset:
0201     kfree(box_offset);
0202 
0203 }
0204 
0205 static int parse_discovery_table(struct pci_dev *dev, int die,
0206                  u32 bar_offset, bool *parsed)
0207 {
0208     struct uncore_global_discovery global;
0209     struct uncore_unit_discovery unit;
0210     void __iomem *io_addr;
0211     resource_size_t addr;
0212     unsigned long size;
0213     u32 val;
0214     int i;
0215 
0216     pci_read_config_dword(dev, bar_offset, &val);
0217 
0218     if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64)
0219         return -EINVAL;
0220 
0221     addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK);
0222 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0223     if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
0224         u32 val2;
0225 
0226         pci_read_config_dword(dev, bar_offset + 4, &val2);
0227         addr |= ((resource_size_t)val2) << 32;
0228     }
0229 #endif
0230     size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
0231     io_addr = ioremap(addr, size);
0232     if (!io_addr)
0233         return -ENOMEM;
0234 
0235     /* Read Global Discovery State */
0236     memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
0237     if (uncore_discovery_invalid_unit(global)) {
0238         pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
0239             global.table1, global.ctl, global.table3);
0240         iounmap(io_addr);
0241         return -EINVAL;
0242     }
0243     iounmap(io_addr);
0244 
0245     size = (1 + global.max_units) * global.stride * 8;
0246     io_addr = ioremap(addr, size);
0247     if (!io_addr)
0248         return -ENOMEM;
0249 
0250     /* Parsing Unit Discovery State */
0251     for (i = 0; i < global.max_units; i++) {
0252         memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
0253                   sizeof(struct uncore_unit_discovery));
0254 
0255         if (uncore_discovery_invalid_unit(unit))
0256             continue;
0257 
0258         if (unit.access_type >= UNCORE_ACCESS_MAX)
0259             continue;
0260 
0261         uncore_insert_box_info(&unit, die, *parsed);
0262     }
0263 
0264     *parsed = true;
0265     iounmap(io_addr);
0266     return 0;
0267 }
0268 
0269 bool intel_uncore_has_discovery_tables(void)
0270 {
0271     u32 device, val, entry_id, bar_offset;
0272     int die, dvsec = 0, ret = true;
0273     struct pci_dev *dev = NULL;
0274     bool parsed = false;
0275 
0276     if (has_generic_discovery_table())
0277         device = UNCORE_DISCOVERY_TABLE_DEVICE;
0278     else
0279         device = PCI_ANY_ID;
0280 
0281     /*
0282      * Start a new search and iterates through the list of
0283      * the discovery table devices.
0284      */
0285     while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
0286         while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
0287             pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
0288             entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
0289             if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
0290                 continue;
0291 
0292             pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
0293 
0294             if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
0295                 ret = false;
0296                 goto err;
0297             }
0298             bar_offset = UNCORE_DISCOVERY_BIR_BASE +
0299                      (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
0300 
0301             die = get_device_die_id(dev);
0302             if (die < 0)
0303                 continue;
0304 
0305             parse_discovery_table(dev, die, bar_offset, &parsed);
0306         }
0307     }
0308 
0309     /* None of the discovery tables are available */
0310     if (!parsed)
0311         ret = false;
0312 err:
0313     pci_dev_put(dev);
0314 
0315     return ret;
0316 }
0317 
0318 void intel_uncore_clear_discovery_tables(void)
0319 {
0320     struct intel_uncore_discovery_type *type, *next;
0321 
0322     rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
0323         kfree(type->box_ctrl_die);
0324         kfree(type);
0325     }
0326 }
0327 
0328 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
0329 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
0330 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
0331 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
0332 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
0333 
0334 static struct attribute *generic_uncore_formats_attr[] = {
0335     &format_attr_event.attr,
0336     &format_attr_umask.attr,
0337     &format_attr_edge.attr,
0338     &format_attr_inv.attr,
0339     &format_attr_thresh.attr,
0340     NULL,
0341 };
0342 
0343 static const struct attribute_group generic_uncore_format_group = {
0344     .name = "format",
0345     .attrs = generic_uncore_formats_attr,
0346 };
0347 
0348 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
0349 {
0350     wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
0351 }
0352 
0353 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
0354 {
0355     wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
0356 }
0357 
0358 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
0359 {
0360     wrmsrl(uncore_msr_box_ctl(box), 0);
0361 }
0362 
0363 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
0364                         struct perf_event *event)
0365 {
0366     struct hw_perf_event *hwc = &event->hw;
0367 
0368     wrmsrl(hwc->config_base, hwc->config);
0369 }
0370 
0371 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
0372                          struct perf_event *event)
0373 {
0374     struct hw_perf_event *hwc = &event->hw;
0375 
0376     wrmsrl(hwc->config_base, 0);
0377 }
0378 
0379 static struct intel_uncore_ops generic_uncore_msr_ops = {
0380     .init_box       = intel_generic_uncore_msr_init_box,
0381     .disable_box        = intel_generic_uncore_msr_disable_box,
0382     .enable_box     = intel_generic_uncore_msr_enable_box,
0383     .disable_event      = intel_generic_uncore_msr_disable_event,
0384     .enable_event       = intel_generic_uncore_msr_enable_event,
0385     .read_counter       = uncore_msr_read_counter,
0386 };
0387 
0388 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
0389 {
0390     struct pci_dev *pdev = box->pci_dev;
0391     int box_ctl = uncore_pci_box_ctl(box);
0392 
0393     __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
0394     pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
0395 }
0396 
0397 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
0398 {
0399     struct pci_dev *pdev = box->pci_dev;
0400     int box_ctl = uncore_pci_box_ctl(box);
0401 
0402     pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
0403 }
0404 
0405 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
0406 {
0407     struct pci_dev *pdev = box->pci_dev;
0408     int box_ctl = uncore_pci_box_ctl(box);
0409 
0410     pci_write_config_dword(pdev, box_ctl, 0);
0411 }
0412 
0413 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
0414                         struct perf_event *event)
0415 {
0416     struct pci_dev *pdev = box->pci_dev;
0417     struct hw_perf_event *hwc = &event->hw;
0418 
0419     pci_write_config_dword(pdev, hwc->config_base, hwc->config);
0420 }
0421 
0422 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
0423                         struct perf_event *event)
0424 {
0425     struct pci_dev *pdev = box->pci_dev;
0426     struct hw_perf_event *hwc = &event->hw;
0427 
0428     pci_write_config_dword(pdev, hwc->config_base, 0);
0429 }
0430 
0431 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
0432                       struct perf_event *event)
0433 {
0434     struct pci_dev *pdev = box->pci_dev;
0435     struct hw_perf_event *hwc = &event->hw;
0436     u64 count = 0;
0437 
0438     pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
0439     pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
0440 
0441     return count;
0442 }
0443 
0444 static struct intel_uncore_ops generic_uncore_pci_ops = {
0445     .init_box   = intel_generic_uncore_pci_init_box,
0446     .disable_box    = intel_generic_uncore_pci_disable_box,
0447     .enable_box = intel_generic_uncore_pci_enable_box,
0448     .disable_event  = intel_generic_uncore_pci_disable_event,
0449     .enable_event   = intel_generic_uncore_pci_enable_event,
0450     .read_counter   = intel_generic_uncore_pci_read_counter,
0451 };
0452 
0453 #define UNCORE_GENERIC_MMIO_SIZE        0x4000
0454 
0455 static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
0456 {
0457     struct intel_uncore_type *type = box->pmu->type;
0458 
0459     if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
0460         return 0;
0461 
0462     return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
0463 }
0464 
0465 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
0466 {
0467     u64 box_ctl = generic_uncore_mmio_box_ctl(box);
0468     struct intel_uncore_type *type = box->pmu->type;
0469     resource_size_t addr;
0470 
0471     if (!box_ctl) {
0472         pr_warn("Uncore type %d box %d: Invalid box control address.\n",
0473             type->type_id, type->box_ids[box->pmu->pmu_idx]);
0474         return;
0475     }
0476 
0477     addr = box_ctl;
0478     box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
0479     if (!box->io_addr) {
0480         pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
0481             type->type_id, type->box_ids[box->pmu->pmu_idx],
0482             (unsigned long long)addr);
0483         return;
0484     }
0485 
0486     writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
0487 }
0488 
0489 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
0490 {
0491     if (!box->io_addr)
0492         return;
0493 
0494     writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
0495 }
0496 
0497 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
0498 {
0499     if (!box->io_addr)
0500         return;
0501 
0502     writel(0, box->io_addr);
0503 }
0504 
0505 void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
0506                         struct perf_event *event)
0507 {
0508     struct hw_perf_event *hwc = &event->hw;
0509 
0510     if (!box->io_addr)
0511         return;
0512 
0513     writel(hwc->config, box->io_addr + hwc->config_base);
0514 }
0515 
0516 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
0517                          struct perf_event *event)
0518 {
0519     struct hw_perf_event *hwc = &event->hw;
0520 
0521     if (!box->io_addr)
0522         return;
0523 
0524     writel(0, box->io_addr + hwc->config_base);
0525 }
0526 
0527 static struct intel_uncore_ops generic_uncore_mmio_ops = {
0528     .init_box   = intel_generic_uncore_mmio_init_box,
0529     .exit_box   = uncore_mmio_exit_box,
0530     .disable_box    = intel_generic_uncore_mmio_disable_box,
0531     .enable_box = intel_generic_uncore_mmio_enable_box,
0532     .disable_event  = intel_generic_uncore_mmio_disable_event,
0533     .enable_event   = intel_generic_uncore_mmio_enable_event,
0534     .read_counter   = uncore_mmio_read_counter,
0535 };
0536 
0537 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
0538                       struct intel_uncore_type *uncore,
0539                       struct intel_uncore_discovery_type *type)
0540 {
0541     uncore->type_id = type->type;
0542     uncore->num_boxes = type->num_boxes;
0543     uncore->num_counters = type->num_counters;
0544     uncore->perf_ctr_bits = type->counter_width;
0545     uncore->box_ids = type->ids;
0546 
0547     switch (type_id) {
0548     case UNCORE_ACCESS_MSR:
0549         uncore->ops = &generic_uncore_msr_ops;
0550         uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
0551         uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
0552         uncore->box_ctl = (unsigned int)type->box_ctrl;
0553         uncore->msr_offsets = type->box_offset;
0554         break;
0555     case UNCORE_ACCESS_PCI:
0556         uncore->ops = &generic_uncore_pci_ops;
0557         uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
0558         uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
0559         uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
0560         uncore->box_ctls = type->box_ctrl_die;
0561         uncore->pci_offsets = type->box_offset;
0562         break;
0563     case UNCORE_ACCESS_MMIO:
0564         uncore->ops = &generic_uncore_mmio_ops;
0565         uncore->perf_ctr = (unsigned int)type->ctr_offset;
0566         uncore->event_ctl = (unsigned int)type->ctl_offset;
0567         uncore->box_ctl = (unsigned int)type->box_ctrl;
0568         uncore->box_ctls = type->box_ctrl_die;
0569         uncore->mmio_offsets = type->box_offset;
0570         uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
0571         break;
0572     default:
0573         return false;
0574     }
0575 
0576     return true;
0577 }
0578 
0579 struct intel_uncore_type **
0580 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
0581 {
0582     struct intel_uncore_discovery_type *type;
0583     struct intel_uncore_type **uncores;
0584     struct intel_uncore_type *uncore;
0585     struct rb_node *node;
0586     int i = 0;
0587 
0588     uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
0589               sizeof(struct intel_uncore_type *), GFP_KERNEL);
0590     if (!uncores)
0591         return empty_uncore;
0592 
0593     for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
0594         type = rb_entry(node, struct intel_uncore_discovery_type, node);
0595         if (type->access_type != type_id)
0596             continue;
0597 
0598         uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
0599         if (!uncore)
0600             break;
0601 
0602         uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
0603         uncore->format_group = &generic_uncore_format_group;
0604 
0605         if (!uncore_update_uncore_type(type_id, uncore, type)) {
0606             kfree(uncore);
0607             continue;
0608         }
0609         uncores[i++] = uncore;
0610     }
0611 
0612     return uncores;
0613 }
0614 
0615 void intel_uncore_generic_uncore_cpu_init(void)
0616 {
0617     uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
0618 }
0619 
0620 int intel_uncore_generic_uncore_pci_init(void)
0621 {
0622     uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
0623 
0624     return 0;
0625 }
0626 
0627 void intel_uncore_generic_uncore_mmio_init(void)
0628 {
0629     uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
0630 }