Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Hypervisor supplied "gpci" ("get performance counter info") performance
0004  * counter support
0005  *
0006  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
0007  * Copyright 2014 IBM Corporation.
0008  */
0009 
0010 #define pr_fmt(fmt) "hv-gpci: " fmt
0011 
0012 #include <linux/init.h>
0013 #include <linux/perf_event.h>
0014 #include <asm/firmware.h>
0015 #include <asm/hvcall.h>
0016 #include <asm/io.h>
0017 
0018 #include "hv-gpci.h"
0019 #include "hv-common.h"
0020 
0021 /*
0022  * Example usage:
0023  *  perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
0024  *        secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
0025  */
0026 
0027 /* u32 */
0028 EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
0029 /* u32 */
0030 /*
0031  * Note that starting_index, phys_processor_idx, sibling_part_id,
0032  * hw_chip_id, partition_id all refer to the same bit range. They
0033  * are basically aliases for the starting_index. The specific alias
0034  * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
0035  */
0036 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
0037 EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63);
0038 EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63);
0039 EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63);
0040 EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63);
0041 
0042 /* u16 */
0043 EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
0044 /* u8 */
0045 EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
0046 /* u8, bytes of data (1-8) */
0047 EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
0048 /* u32, byte offset */
0049 EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
0050 
0051 static cpumask_t hv_gpci_cpumask;
0052 
0053 static struct attribute *format_attrs[] = {
0054     &format_attr_request.attr,
0055     &format_attr_starting_index.attr,
0056     &format_attr_phys_processor_idx.attr,
0057     &format_attr_sibling_part_id.attr,
0058     &format_attr_hw_chip_id.attr,
0059     &format_attr_partition_id.attr,
0060     &format_attr_secondary_index.attr,
0061     &format_attr_counter_info_version.attr,
0062 
0063     &format_attr_offset.attr,
0064     &format_attr_length.attr,
0065     NULL,
0066 };
0067 
0068 static const struct attribute_group format_group = {
0069     .name = "format",
0070     .attrs = format_attrs,
0071 };
0072 
0073 static const struct attribute_group event_group = {
0074     .name  = "events",
0075     .attrs = hv_gpci_event_attrs,
0076 };
0077 
0078 #define HV_CAPS_ATTR(_name, _format)                \
0079 static ssize_t _name##_show(struct device *dev,         \
0080                 struct device_attribute *attr,  \
0081                 char *page)             \
0082 {                               \
0083     struct hv_perf_caps caps;               \
0084     unsigned long hret = hv_perf_caps_get(&caps);       \
0085     if (hret)                       \
0086         return -EIO;                    \
0087                                 \
0088     return sprintf(page, _format, caps._name);      \
0089 }                               \
0090 static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
0091 
0092 static ssize_t kernel_version_show(struct device *dev,
0093                    struct device_attribute *attr,
0094                    char *page)
0095 {
0096     return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
0097 }
0098 
0099 static ssize_t cpumask_show(struct device *dev,
0100                 struct device_attribute *attr, char *buf)
0101 {
0102     return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask);
0103 }
0104 
0105 static DEVICE_ATTR_RO(kernel_version);
0106 static DEVICE_ATTR_RO(cpumask);
0107 
0108 HV_CAPS_ATTR(version, "0x%x\n");
0109 HV_CAPS_ATTR(ga, "%d\n");
0110 HV_CAPS_ATTR(expanded, "%d\n");
0111 HV_CAPS_ATTR(lab, "%d\n");
0112 HV_CAPS_ATTR(collect_privileged, "%d\n");
0113 
0114 static struct attribute *interface_attrs[] = {
0115     &dev_attr_kernel_version.attr,
0116     &hv_caps_attr_version.attr,
0117     &hv_caps_attr_ga.attr,
0118     &hv_caps_attr_expanded.attr,
0119     &hv_caps_attr_lab.attr,
0120     &hv_caps_attr_collect_privileged.attr,
0121     NULL,
0122 };
0123 
0124 static struct attribute *cpumask_attrs[] = {
0125     &dev_attr_cpumask.attr,
0126     NULL,
0127 };
0128 
0129 static const struct attribute_group cpumask_attr_group = {
0130     .attrs = cpumask_attrs,
0131 };
0132 
0133 static const struct attribute_group interface_group = {
0134     .name = "interface",
0135     .attrs = interface_attrs,
0136 };
0137 
0138 static const struct attribute_group *attr_groups[] = {
0139     &format_group,
0140     &event_group,
0141     &interface_group,
0142     &cpumask_attr_group,
0143     NULL,
0144 };
0145 
0146 static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
0147 
0148 static unsigned long single_gpci_request(u32 req, u32 starting_index,
0149         u16 secondary_index, u8 version_in, u32 offset, u8 length,
0150         u64 *value)
0151 {
0152     unsigned long ret;
0153     size_t i;
0154     u64 count;
0155     struct hv_gpci_request_buffer *arg;
0156 
0157     arg = (void *)get_cpu_var(hv_gpci_reqb);
0158     memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
0159 
0160     arg->params.counter_request = cpu_to_be32(req);
0161     arg->params.starting_index = cpu_to_be32(starting_index);
0162     arg->params.secondary_index = cpu_to_be16(secondary_index);
0163     arg->params.counter_info_version_in = version_in;
0164 
0165     ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
0166             virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
0167     if (ret) {
0168         pr_devel("hcall failed: 0x%lx\n", ret);
0169         goto out;
0170     }
0171 
0172     /*
0173      * we verify offset and length are within the zeroed buffer at event
0174      * init.
0175      */
0176     count = 0;
0177     for (i = offset; i < offset + length; i++)
0178         count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
0179 
0180     *value = count;
0181 out:
0182     put_cpu_var(hv_gpci_reqb);
0183     return ret;
0184 }
0185 
0186 static u64 h_gpci_get_value(struct perf_event *event)
0187 {
0188     u64 count;
0189     unsigned long ret = single_gpci_request(event_get_request(event),
0190                     event_get_starting_index(event),
0191                     event_get_secondary_index(event),
0192                     event_get_counter_info_version(event),
0193                     event_get_offset(event),
0194                     event_get_length(event),
0195                     &count);
0196     if (ret)
0197         return 0;
0198     return count;
0199 }
0200 
0201 static void h_gpci_event_update(struct perf_event *event)
0202 {
0203     s64 prev;
0204     u64 now = h_gpci_get_value(event);
0205     prev = local64_xchg(&event->hw.prev_count, now);
0206     local64_add(now - prev, &event->count);
0207 }
0208 
0209 static void h_gpci_event_start(struct perf_event *event, int flags)
0210 {
0211     local64_set(&event->hw.prev_count, h_gpci_get_value(event));
0212 }
0213 
0214 static void h_gpci_event_stop(struct perf_event *event, int flags)
0215 {
0216     h_gpci_event_update(event);
0217 }
0218 
0219 static int h_gpci_event_add(struct perf_event *event, int flags)
0220 {
0221     if (flags & PERF_EF_START)
0222         h_gpci_event_start(event, flags);
0223 
0224     return 0;
0225 }
0226 
0227 static int h_gpci_event_init(struct perf_event *event)
0228 {
0229     u64 count;
0230     u8 length;
0231 
0232     /* Not our event */
0233     if (event->attr.type != event->pmu->type)
0234         return -ENOENT;
0235 
0236     /* config2 is unused */
0237     if (event->attr.config2) {
0238         pr_devel("config2 set when reserved\n");
0239         return -EINVAL;
0240     }
0241 
0242     /* no branch sampling */
0243     if (has_branch_stack(event))
0244         return -EOPNOTSUPP;
0245 
0246     length = event_get_length(event);
0247     if (length < 1 || length > 8) {
0248         pr_devel("length invalid\n");
0249         return -EINVAL;
0250     }
0251 
0252     /* last byte within the buffer? */
0253     if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) {
0254         pr_devel("request outside of buffer: %zu > %zu\n",
0255                 (size_t)event_get_offset(event) + length,
0256                 HGPCI_MAX_DATA_BYTES);
0257         return -EINVAL;
0258     }
0259 
0260     /* check if the request works... */
0261     if (single_gpci_request(event_get_request(event),
0262                 event_get_starting_index(event),
0263                 event_get_secondary_index(event),
0264                 event_get_counter_info_version(event),
0265                 event_get_offset(event),
0266                 length,
0267                 &count)) {
0268         pr_devel("gpci hcall failed\n");
0269         return -EINVAL;
0270     }
0271 
0272     return 0;
0273 }
0274 
0275 static struct pmu h_gpci_pmu = {
0276     .task_ctx_nr = perf_invalid_context,
0277 
0278     .name = "hv_gpci",
0279     .attr_groups = attr_groups,
0280     .event_init  = h_gpci_event_init,
0281     .add         = h_gpci_event_add,
0282     .del         = h_gpci_event_stop,
0283     .start       = h_gpci_event_start,
0284     .stop        = h_gpci_event_stop,
0285     .read        = h_gpci_event_update,
0286     .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
0287 };
0288 
0289 static int ppc_hv_gpci_cpu_online(unsigned int cpu)
0290 {
0291     if (cpumask_empty(&hv_gpci_cpumask))
0292         cpumask_set_cpu(cpu, &hv_gpci_cpumask);
0293 
0294     return 0;
0295 }
0296 
0297 static int ppc_hv_gpci_cpu_offline(unsigned int cpu)
0298 {
0299     int target;
0300 
0301     /* Check if exiting cpu is used for collecting gpci events */
0302     if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask))
0303         return 0;
0304 
0305     /* Find a new cpu to collect gpci events */
0306     target = cpumask_last(cpu_active_mask);
0307 
0308     if (target < 0 || target >= nr_cpu_ids) {
0309         pr_err("hv_gpci: CPU hotplug init failed\n");
0310         return -1;
0311     }
0312 
0313     /* Migrate gpci events to the new target */
0314     cpumask_set_cpu(target, &hv_gpci_cpumask);
0315     perf_pmu_migrate_context(&h_gpci_pmu, cpu, target);
0316 
0317     return 0;
0318 }
0319 
0320 static int hv_gpci_cpu_hotplug_init(void)
0321 {
0322     return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
0323               "perf/powerpc/hv_gcpi:online",
0324               ppc_hv_gpci_cpu_online,
0325               ppc_hv_gpci_cpu_offline);
0326 }
0327 
0328 static int hv_gpci_init(void)
0329 {
0330     int r;
0331     unsigned long hret;
0332     struct hv_perf_caps caps;
0333 
0334     hv_gpci_assert_offsets_correct();
0335 
0336     if (!firmware_has_feature(FW_FEATURE_LPAR)) {
0337         pr_debug("not a virtualized system, not enabling\n");
0338         return -ENODEV;
0339     }
0340 
0341     hret = hv_perf_caps_get(&caps);
0342     if (hret) {
0343         pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
0344                 hret);
0345         return -ENODEV;
0346     }
0347 
0348     /* init cpuhotplug */
0349     r = hv_gpci_cpu_hotplug_init();
0350     if (r)
0351         return r;
0352 
0353     /* sampling not supported */
0354     h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
0355 
0356     r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
0357     if (r)
0358         return r;
0359 
0360     return 0;
0361 }
0362 
0363 device_initcall(hv_gpci_init);