Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #include <linux/slab.h>
0003 #include <linux/pci.h>
0004 #include <asm/apicdef.h>
0005 #include <linux/io-64-nonatomic-lo-hi.h>
0006 
0007 #include <linux/perf_event.h>
0008 #include "../perf_event.h"
0009 
0010 #define UNCORE_PMU_NAME_LEN     32
0011 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
0012 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
0013 
0014 #define UNCORE_FIXED_EVENT      0xff
0015 #define UNCORE_PMC_IDX_MAX_GENERIC  8
0016 #define UNCORE_PMC_IDX_MAX_FIXED    1
0017 #define UNCORE_PMC_IDX_MAX_FREERUNNING  1
0018 #define UNCORE_PMC_IDX_FIXED        UNCORE_PMC_IDX_MAX_GENERIC
0019 #define UNCORE_PMC_IDX_FREERUNNING  (UNCORE_PMC_IDX_FIXED + \
0020                     UNCORE_PMC_IDX_MAX_FIXED)
0021 #define UNCORE_PMC_IDX_MAX      (UNCORE_PMC_IDX_FREERUNNING + \
0022                     UNCORE_PMC_IDX_MAX_FREERUNNING)
0023 
0024 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)  \
0025         ((dev << 24) | (func << 16) | (type << 8) | idx)
0026 #define UNCORE_PCI_DEV_DATA(type, idx)  ((type << 8) | idx)
0027 #define UNCORE_PCI_DEV_DEV(data)    ((data >> 24) & 0xff)
0028 #define UNCORE_PCI_DEV_FUNC(data)   ((data >> 16) & 0xff)
0029 #define UNCORE_PCI_DEV_TYPE(data)   ((data >> 8) & 0xff)
0030 #define UNCORE_PCI_DEV_IDX(data)    (data & 0xff)
0031 #define UNCORE_EXTRA_PCI_DEV        0xff
0032 #define UNCORE_EXTRA_PCI_DEV_MAX    4
0033 
0034 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
0035 
0036 struct pci_extra_dev {
0037     struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
0038 };
0039 
0040 struct intel_uncore_ops;
0041 struct intel_uncore_pmu;
0042 struct intel_uncore_box;
0043 struct uncore_event_desc;
0044 struct freerunning_counters;
0045 struct intel_uncore_topology;
0046 
0047 struct intel_uncore_type {
0048     const char *name;
0049     int num_counters;
0050     int num_boxes;
0051     int perf_ctr_bits;
0052     int fixed_ctr_bits;
0053     int num_freerunning_types;
0054     int type_id;
0055     unsigned perf_ctr;
0056     unsigned event_ctl;
0057     unsigned event_mask;
0058     unsigned event_mask_ext;
0059     unsigned fixed_ctr;
0060     unsigned fixed_ctl;
0061     unsigned box_ctl;
0062     u64 *box_ctls;  /* Unit ctrl addr of the first box of each die */
0063     union {
0064         unsigned msr_offset;
0065         unsigned mmio_offset;
0066     };
0067     unsigned mmio_map_size;
0068     unsigned num_shared_regs:8;
0069     unsigned single_fixed:1;
0070     unsigned pair_ctr_ctl:1;
0071     union {
0072         unsigned *msr_offsets;
0073         unsigned *pci_offsets;
0074         unsigned *mmio_offsets;
0075     };
0076     unsigned *box_ids;
0077     struct event_constraint unconstrainted;
0078     struct event_constraint *constraints;
0079     struct intel_uncore_pmu *pmus;
0080     struct intel_uncore_ops *ops;
0081     struct uncore_event_desc *event_descs;
0082     struct freerunning_counters *freerunning;
0083     const struct attribute_group *attr_groups[4];
0084     const struct attribute_group **attr_update;
0085     struct pmu *pmu; /* for custom pmu ops */
0086     /*
0087      * Uncore PMU would store relevant platform topology configuration here
0088      * to identify which platform component each PMON block of that type is
0089      * supposed to monitor.
0090      */
0091     struct intel_uncore_topology *topology;
0092     /*
0093      * Optional callbacks for managing mapping of Uncore units to PMONs
0094      */
0095     int (*get_topology)(struct intel_uncore_type *type);
0096     int (*set_mapping)(struct intel_uncore_type *type);
0097     void (*cleanup_mapping)(struct intel_uncore_type *type);
0098 };
0099 
0100 #define pmu_group attr_groups[0]
0101 #define format_group attr_groups[1]
0102 #define events_group attr_groups[2]
0103 
0104 struct intel_uncore_ops {
0105     void (*init_box)(struct intel_uncore_box *);
0106     void (*exit_box)(struct intel_uncore_box *);
0107     void (*disable_box)(struct intel_uncore_box *);
0108     void (*enable_box)(struct intel_uncore_box *);
0109     void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
0110     void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
0111     u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
0112     int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
0113     struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
0114                            struct perf_event *);
0115     void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
0116 };
0117 
0118 struct intel_uncore_pmu {
0119     struct pmu          pmu;
0120     char                name[UNCORE_PMU_NAME_LEN];
0121     int             pmu_idx;
0122     int             func_id;
0123     bool                registered;
0124     atomic_t            activeboxes;
0125     struct intel_uncore_type    *type;
0126     struct intel_uncore_box     **boxes;
0127 };
0128 
0129 struct intel_uncore_extra_reg {
0130     raw_spinlock_t lock;
0131     u64 config, config1, config2;
0132     atomic_t ref;
0133 };
0134 
0135 struct intel_uncore_box {
0136     int dieid;  /* Logical die ID */
0137     int n_active;   /* number of active events */
0138     int n_events;
0139     int cpu;    /* cpu to collect events */
0140     unsigned long flags;
0141     atomic_t refcnt;
0142     struct perf_event *events[UNCORE_PMC_IDX_MAX];
0143     struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
0144     struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
0145     unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
0146     u64 tags[UNCORE_PMC_IDX_MAX];
0147     struct pci_dev *pci_dev;
0148     struct intel_uncore_pmu *pmu;
0149     u64 hrtimer_duration; /* hrtimer timeout for this box */
0150     struct hrtimer hrtimer;
0151     struct list_head list;
0152     struct list_head active_list;
0153     void __iomem *io_addr;
0154     struct intel_uncore_extra_reg shared_regs[];
0155 };
0156 
0157 /* CFL uncore 8th cbox MSRs */
0158 #define CFL_UNC_CBO_7_PERFEVTSEL0       0xf70
0159 #define CFL_UNC_CBO_7_PER_CTR0          0xf76
0160 
0161 #define UNCORE_BOX_FLAG_INITIATED       0
0162 /* event config registers are 8-byte apart */
0163 #define UNCORE_BOX_FLAG_CTL_OFFS8       1
0164 /* CFL 8th CBOX has different MSR space */
0165 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS  2
0166 
0167 struct uncore_event_desc {
0168     struct device_attribute attr;
0169     const char *config;
0170 };
0171 
0172 struct freerunning_counters {
0173     unsigned int counter_base;
0174     unsigned int counter_offset;
0175     unsigned int box_offset;
0176     unsigned int num_counters;
0177     unsigned int bits;
0178     unsigned *box_offsets;
0179 };
0180 
0181 struct intel_uncore_topology {
0182     u64 configuration;
0183     int segment;
0184 };
0185 
0186 struct pci2phy_map {
0187     struct list_head list;
0188     int segment;
0189     int pbus_to_dieid[256];
0190 };
0191 
0192 struct pci2phy_map *__find_pci2phy_map(int segment);
0193 int uncore_pcibus_to_dieid(struct pci_bus *bus);
0194 int uncore_die_to_segment(int die);
0195 
0196 ssize_t uncore_event_show(struct device *dev,
0197               struct device_attribute *attr, char *buf);
0198 
0199 static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
0200 {
0201     return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
0202 }
0203 
0204 #define to_device_attribute(n)  container_of(n, struct device_attribute, attr)
0205 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr)
0206 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n))
0207 
0208 extern int __uncore_max_dies;
0209 #define uncore_max_dies()   (__uncore_max_dies)
0210 
0211 #define INTEL_UNCORE_EVENT_DESC(_name, _config)         \
0212 {                               \
0213     .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
0214     .config = _config,                  \
0215 }
0216 
0217 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)         \
0218 static ssize_t __uncore_##_var##_show(struct device *dev,       \
0219                 struct device_attribute *attr,      \
0220                 char *page)             \
0221 {                                   \
0222     BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);         \
0223     return sprintf(page, _format "\n");             \
0224 }                                   \
0225 static struct device_attribute format_attr_##_var =         \
0226     __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
0227 
0228 static inline bool uncore_pmc_fixed(int idx)
0229 {
0230     return idx == UNCORE_PMC_IDX_FIXED;
0231 }
0232 
0233 static inline bool uncore_pmc_freerunning(int idx)
0234 {
0235     return idx == UNCORE_PMC_IDX_FREERUNNING;
0236 }
0237 
0238 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
0239                            unsigned long offset)
0240 {
0241     if (offset < box->pmu->type->mmio_map_size)
0242         return true;
0243 
0244     pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
0245              offset, box->pmu->type->name);
0246 
0247     return false;
0248 }
0249 
0250 static inline
0251 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
0252 {
0253     return box->pmu->type->box_ctl +
0254            box->pmu->type->mmio_offset * box->pmu->pmu_idx;
0255 }
0256 
0257 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
0258 {
0259     return box->pmu->type->box_ctl;
0260 }
0261 
0262 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
0263 {
0264     return box->pmu->type->fixed_ctl;
0265 }
0266 
0267 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
0268 {
0269     return box->pmu->type->fixed_ctr;
0270 }
0271 
0272 static inline
0273 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
0274 {
0275     if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
0276         return idx * 8 + box->pmu->type->event_ctl;
0277 
0278     return idx * 4 + box->pmu->type->event_ctl;
0279 }
0280 
0281 static inline
0282 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
0283 {
0284     return idx * 8 + box->pmu->type->perf_ctr;
0285 }
0286 
0287 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
0288 {
0289     struct intel_uncore_pmu *pmu = box->pmu;
0290     return pmu->type->msr_offsets ?
0291         pmu->type->msr_offsets[pmu->pmu_idx] :
0292         pmu->type->msr_offset * pmu->pmu_idx;
0293 }
0294 
0295 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
0296 {
0297     if (!box->pmu->type->box_ctl)
0298         return 0;
0299     return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
0300 }
0301 
0302 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
0303 {
0304     if (!box->pmu->type->fixed_ctl)
0305         return 0;
0306     return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
0307 }
0308 
0309 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
0310 {
0311     return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
0312 }
0313 
0314 
0315 /*
0316  * In the uncore document, there is no event-code assigned to free running
0317  * counters. Some events need to be defined to indicate the free running
0318  * counters. The events are encoded as event-code + umask-code.
0319  *
0320  * The event-code for all free running counters is 0xff, which is the same as
0321  * the fixed counters.
0322  *
0323  * The umask-code is used to distinguish a fixed counter and a free running
0324  * counter, and different types of free running counters.
0325  * - For fixed counters, the umask-code is 0x0X.
0326  *   X indicates the index of the fixed counter, which starts from 0.
0327  * - For free running counters, the umask-code uses the rest of the space.
0328  *   It would bare the format of 0xXY.
0329  *   X stands for the type of free running counters, which starts from 1.
0330  *   Y stands for the index of free running counters of same type, which
0331  *   starts from 0.
0332  *
0333  * For example, there are three types of IIO free running counters on Skylake
0334  * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
0335  * The event-code for all the free running counters is 0xff.
0336  * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
0337  * which umask-code starts from 0x10.
0338  * So 'ioclk' is encoded as event=0xff,umask=0x10
0339  * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
0340  * the second type, which umask-code starts from 0x20.
0341  * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
0342  */
0343 static inline unsigned int uncore_freerunning_idx(u64 config)
0344 {
0345     return ((config >> 8) & 0xf);
0346 }
0347 
0348 #define UNCORE_FREERUNNING_UMASK_START      0x10
0349 
0350 static inline unsigned int uncore_freerunning_type(u64 config)
0351 {
0352     return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
0353 }
0354 
0355 static inline
0356 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
0357                     struct perf_event *event)
0358 {
0359     unsigned int type = uncore_freerunning_type(event->hw.config);
0360     unsigned int idx = uncore_freerunning_idx(event->hw.config);
0361     struct intel_uncore_pmu *pmu = box->pmu;
0362 
0363     return pmu->type->freerunning[type].counter_base +
0364            pmu->type->freerunning[type].counter_offset * idx +
0365            (pmu->type->freerunning[type].box_offsets ?
0366             pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
0367             pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
0368 }
0369 
0370 static inline
0371 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
0372 {
0373     if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
0374         return CFL_UNC_CBO_7_PERFEVTSEL0 +
0375                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
0376     } else {
0377         return box->pmu->type->event_ctl +
0378                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
0379                uncore_msr_box_offset(box);
0380     }
0381 }
0382 
0383 static inline
0384 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
0385 {
0386     if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
0387         return CFL_UNC_CBO_7_PER_CTR0 +
0388                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
0389     } else {
0390         return box->pmu->type->perf_ctr +
0391                (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
0392                uncore_msr_box_offset(box);
0393     }
0394 }
0395 
0396 static inline
0397 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
0398 {
0399     if (box->pci_dev || box->io_addr)
0400         return uncore_pci_fixed_ctl(box);
0401     else
0402         return uncore_msr_fixed_ctl(box);
0403 }
0404 
0405 static inline
0406 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
0407 {
0408     if (box->pci_dev || box->io_addr)
0409         return uncore_pci_fixed_ctr(box);
0410     else
0411         return uncore_msr_fixed_ctr(box);
0412 }
0413 
0414 static inline
0415 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
0416 {
0417     if (box->pci_dev || box->io_addr)
0418         return uncore_pci_event_ctl(box, idx);
0419     else
0420         return uncore_msr_event_ctl(box, idx);
0421 }
0422 
0423 static inline
0424 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
0425 {
0426     if (box->pci_dev || box->io_addr)
0427         return uncore_pci_perf_ctr(box, idx);
0428     else
0429         return uncore_msr_perf_ctr(box, idx);
0430 }
0431 
0432 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
0433 {
0434     return box->pmu->type->perf_ctr_bits;
0435 }
0436 
0437 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
0438 {
0439     return box->pmu->type->fixed_ctr_bits;
0440 }
0441 
0442 static inline
0443 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
0444                      struct perf_event *event)
0445 {
0446     unsigned int type = uncore_freerunning_type(event->hw.config);
0447 
0448     return box->pmu->type->freerunning[type].bits;
0449 }
0450 
0451 static inline int uncore_num_freerunning(struct intel_uncore_box *box,
0452                      struct perf_event *event)
0453 {
0454     unsigned int type = uncore_freerunning_type(event->hw.config);
0455 
0456     return box->pmu->type->freerunning[type].num_counters;
0457 }
0458 
0459 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
0460                            struct perf_event *event)
0461 {
0462     return box->pmu->type->num_freerunning_types;
0463 }
0464 
0465 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
0466                          struct perf_event *event)
0467 {
0468     unsigned int type = uncore_freerunning_type(event->hw.config);
0469     unsigned int idx = uncore_freerunning_idx(event->hw.config);
0470 
0471     return (type < uncore_num_freerunning_types(box, event)) &&
0472            (idx < uncore_num_freerunning(box, event));
0473 }
0474 
0475 static inline int uncore_num_counters(struct intel_uncore_box *box)
0476 {
0477     return box->pmu->type->num_counters;
0478 }
0479 
0480 static inline bool is_freerunning_event(struct perf_event *event)
0481 {
0482     u64 cfg = event->attr.config;
0483 
0484     return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
0485            (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
0486 }
0487 
0488 /* Check and reject invalid config */
0489 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
0490                            struct perf_event *event)
0491 {
0492     if (is_freerunning_event(event))
0493         return 0;
0494 
0495     return -EINVAL;
0496 }
0497 
0498 static inline void uncore_disable_event(struct intel_uncore_box *box,
0499                 struct perf_event *event)
0500 {
0501     box->pmu->type->ops->disable_event(box, event);
0502 }
0503 
0504 static inline void uncore_enable_event(struct intel_uncore_box *box,
0505                 struct perf_event *event)
0506 {
0507     box->pmu->type->ops->enable_event(box, event);
0508 }
0509 
0510 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
0511                 struct perf_event *event)
0512 {
0513     return box->pmu->type->ops->read_counter(box, event);
0514 }
0515 
0516 static inline void uncore_box_init(struct intel_uncore_box *box)
0517 {
0518     if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
0519         if (box->pmu->type->ops->init_box)
0520             box->pmu->type->ops->init_box(box);
0521     }
0522 }
0523 
0524 static inline void uncore_box_exit(struct intel_uncore_box *box)
0525 {
0526     if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
0527         if (box->pmu->type->ops->exit_box)
0528             box->pmu->type->ops->exit_box(box);
0529     }
0530 }
0531 
0532 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
0533 {
0534     return (box->dieid < 0);
0535 }
0536 
0537 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
0538 {
0539     return container_of(event->pmu, struct intel_uncore_pmu, pmu);
0540 }
0541 
0542 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
0543 {
0544     return event->pmu_private;
0545 }
0546 
0547 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
0548 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
0549 void uncore_mmio_exit_box(struct intel_uncore_box *box);
0550 u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
0551                  struct perf_event *event);
0552 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
0553 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
0554 void uncore_pmu_event_start(struct perf_event *event, int flags);
0555 void uncore_pmu_event_stop(struct perf_event *event, int flags);
0556 int uncore_pmu_event_add(struct perf_event *event, int flags);
0557 void uncore_pmu_event_del(struct perf_event *event, int flags);
0558 void uncore_pmu_event_read(struct perf_event *event);
0559 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
0560 struct event_constraint *
0561 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
0562 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
0563 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
0564 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
0565 
0566 extern struct intel_uncore_type *empty_uncore[];
0567 extern struct intel_uncore_type **uncore_msr_uncores;
0568 extern struct intel_uncore_type **uncore_pci_uncores;
0569 extern struct intel_uncore_type **uncore_mmio_uncores;
0570 extern struct pci_driver *uncore_pci_driver;
0571 extern struct pci_driver *uncore_pci_sub_driver;
0572 extern raw_spinlock_t pci2phy_map_lock;
0573 extern struct list_head pci2phy_map_head;
0574 extern struct pci_extra_dev *uncore_extra_pci_dev;
0575 extern struct event_constraint uncore_constraint_empty;
0576 
0577 /* uncore_snb.c */
0578 int snb_uncore_pci_init(void);
0579 int ivb_uncore_pci_init(void);
0580 int hsw_uncore_pci_init(void);
0581 int bdw_uncore_pci_init(void);
0582 int skl_uncore_pci_init(void);
0583 void snb_uncore_cpu_init(void);
0584 void nhm_uncore_cpu_init(void);
0585 void skl_uncore_cpu_init(void);
0586 void icl_uncore_cpu_init(void);
0587 void tgl_uncore_cpu_init(void);
0588 void adl_uncore_cpu_init(void);
0589 void tgl_uncore_mmio_init(void);
0590 void tgl_l_uncore_mmio_init(void);
0591 void adl_uncore_mmio_init(void);
0592 int snb_pci2phy_map_init(int devid);
0593 
0594 /* uncore_snbep.c */
0595 int snbep_uncore_pci_init(void);
0596 void snbep_uncore_cpu_init(void);
0597 int ivbep_uncore_pci_init(void);
0598 void ivbep_uncore_cpu_init(void);
0599 int hswep_uncore_pci_init(void);
0600 void hswep_uncore_cpu_init(void);
0601 int bdx_uncore_pci_init(void);
0602 void bdx_uncore_cpu_init(void);
0603 int knl_uncore_pci_init(void);
0604 void knl_uncore_cpu_init(void);
0605 int skx_uncore_pci_init(void);
0606 void skx_uncore_cpu_init(void);
0607 int snr_uncore_pci_init(void);
0608 void snr_uncore_cpu_init(void);
0609 void snr_uncore_mmio_init(void);
0610 int icx_uncore_pci_init(void);
0611 void icx_uncore_cpu_init(void);
0612 void icx_uncore_mmio_init(void);
0613 int spr_uncore_pci_init(void);
0614 void spr_uncore_cpu_init(void);
0615 void spr_uncore_mmio_init(void);
0616 
0617 /* uncore_nhmex.c */
0618 void nhmex_uncore_cpu_init(void);