![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-or-later */ 0002 /* 0003 * Performance event support - PowerPC classic/server specific definitions. 0004 * 0005 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 0006 */ 0007 0008 #include <linux/types.h> 0009 #include <asm/hw_irq.h> 0010 #include <linux/device.h> 0011 #include <uapi/asm/perf_event.h> 0012 0013 /* Update perf_event_print_debug() if this changes */ 0014 #define MAX_HWEVENTS 8 0015 #define MAX_EVENT_ALTERNATIVES 8 0016 #define MAX_LIMITED_HWCOUNTERS 2 0017 0018 struct perf_event; 0019 0020 struct mmcr_regs { 0021 unsigned long mmcr0; 0022 unsigned long mmcr1; 0023 unsigned long mmcr2; 0024 unsigned long mmcra; 0025 unsigned long mmcr3; 0026 }; 0027 /* 0028 * This struct provides the constants and functions needed to 0029 * describe the PMU on a particular POWER-family CPU. 0030 */ 0031 struct power_pmu { 0032 const char *name; 0033 int n_counter; 0034 int max_alternatives; 0035 unsigned long add_fields; 0036 unsigned long test_adder; 0037 int (*compute_mmcr)(u64 events[], int n_ev, 0038 unsigned int hwc[], struct mmcr_regs *mmcr, 0039 struct perf_event *pevents[], u32 flags); 0040 int (*get_constraint)(u64 event_id, unsigned long *mskp, 0041 unsigned long *valp, u64 event_config1); 0042 int (*get_alternatives)(u64 event_id, unsigned int flags, 0043 u64 alt[]); 0044 void (*get_mem_data_src)(union perf_mem_data_src *dsrc, 0045 u32 flags, struct pt_regs *regs); 0046 void (*get_mem_weight)(u64 *weight, u64 type); 0047 unsigned long group_constraint_mask; 0048 unsigned long group_constraint_val; 0049 u64 (*bhrb_filter_map)(u64 branch_sample_type); 0050 void (*config_bhrb)(u64 pmu_bhrb_filter); 0051 void (*disable_pmc)(unsigned int pmc, struct mmcr_regs *mmcr); 0052 int (*limited_pmc_event)(u64 event_id); 0053 u32 flags; 0054 const struct attribute_group **attr_groups; 0055 int n_generic; 0056 int *generic_events; 0057 u64 (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 0058 [PERF_COUNT_HW_CACHE_OP_MAX] 0059 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 0060 0061 int n_blacklist_ev; 0062 int *blacklist_ev; 0063 /* BHRB entries in the PMU */ 0064 int bhrb_nr; 0065 /* 0066 * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if 0067 * the pmu supports extended perf regs capability 0068 */ 0069 int capabilities; 0070 /* 0071 * Function to check event code for values which are 0072 * reserved. Function takes struct perf_event as input, 0073 * since event code could be spread in attr.config* 0074 */ 0075 int (*check_attr_config)(struct perf_event *ev); 0076 }; 0077 0078 /* 0079 * Values for power_pmu.flags 0080 */ 0081 #define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */ 0082 #define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */ 0083 #define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */ 0084 #define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */ 0085 #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ 0086 #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 0087 #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 0088 #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ 0089 #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ 0090 #define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ 0091 #define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */ 0092 #define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */ 0093 0094 /* 0095 * Values for flags to get_alternatives() 0096 */ 0097 #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ 0098 #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 0099 #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 0100 0101 int __init register_power_pmu(struct power_pmu *pmu); 0102 0103 struct pt_regs; 0104 extern unsigned long perf_misc_flags(struct pt_regs *regs); 0105 extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 0106 extern unsigned long int read_bhrb(int n); 0107 0108 /* 0109 * Only override the default definitions in include/linux/perf_event.h 0110 * if we have hardware PMU support. 0111 */ 0112 #ifdef CONFIG_PPC_PERF_CTRS 0113 #define perf_misc_flags(regs) perf_misc_flags(regs) 0114 #endif 0115 0116 /* 0117 * The power_pmu.get_constraint function returns a 32/64-bit value and 0118 * a 32/64-bit mask that express the constraints between this event_id and 0119 * other events. 0120 * 0121 * The value and mask are divided up into (non-overlapping) bitfields 0122 * of three different types: 0123 * 0124 * Select field: this expresses the constraint that some set of bits 0125 * in MMCR* needs to be set to a specific value for this event_id. For a 0126 * select field, the mask contains 1s in every bit of the field, and 0127 * the value contains a unique value for each possible setting of the 0128 * MMCR* bits. The constraint checking code will ensure that two events 0129 * that set the same field in their masks have the same value in their 0130 * value dwords. 0131 * 0132 * Add field: this expresses the constraint that there can be at most 0133 * N events in a particular class. A field of k bits can be used for 0134 * N <= 2^(k-1) - 1. The mask has the most significant bit of the field 0135 * set (and the other bits 0), and the value has only the least significant 0136 * bit of the field set. In addition, the 'add_fields' and 'test_adder' 0137 * in the struct power_pmu for this processor come into play. The 0138 * add_fields value contains 1 in the LSB of the field, and the 0139 * test_adder contains 2^(k-1) - 1 - N in the field. 0140 * 0141 * NAND field: this expresses the constraint that you may not have events 0142 * in all of a set of classes. (For example, on PPC970, you can't select 0143 * events from the FPU, ISU and IDU simultaneously, although any two are 0144 * possible.) For N classes, the field is N+1 bits wide, and each class 0145 * is assigned one bit from the least-significant N bits. The mask has 0146 * only the most-significant bit set, and the value has only the bit 0147 * for the event_id's class set. The test_adder has the least significant 0148 * bit set in the field. 0149 * 0150 * If an event_id is not subject to the constraint expressed by a particular 0151 * field, then it will have 0 in both the mask and value for that field. 0152 */ 0153 0154 extern ssize_t power_events_sysfs_show(struct device *dev, 0155 struct device_attribute *attr, char *page); 0156 0157 /* 0158 * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix. 0159 * 0160 * Having a suffix allows us to have aliases in sysfs - eg: the generic 0161 * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and 0162 * 'PM_CYC' where the latter is the name by which the event is known in 0163 * POWER CPU specification. 0164 * 0165 * Similarly, some hardware and cache events use the same event code. Eg. 0166 * on POWER8, both "cache-references" and "L1-dcache-loads" events refer 0167 * to the same event, PM_LD_REF_L1. The suffix, allows us to have two 0168 * sysfs objects for the same event and thus two entries/aliases in sysfs. 0169 */ 0170 #define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix 0171 #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr 0172 0173 #define EVENT_ATTR(_name, _id, _suffix) \ 0174 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id, \ 0175 power_events_sysfs_show) 0176 0177 #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) 0178 #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) 0179 0180 #define CACHE_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _c) 0181 #define CACHE_EVENT_PTR(_id) EVENT_PTR(_id, _c) 0182 0183 #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p) 0184 #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |