Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2019 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/perf_event.h>
0025 #include <linux/init.h>
0026 #include "amdgpu.h"
0027 #include "amdgpu_pmu.h"
0028 
0029 #define PMU_NAME_SIZE 32
0030 #define NUM_FORMATS_AMDGPU_PMU      4
0031 #define NUM_FORMATS_DF_VEGA20       3
0032 #define NUM_EVENTS_DF_VEGA20        8
0033 #define NUM_EVENT_TYPES_VEGA20      1
0034 #define NUM_EVENTS_VEGA20_XGMI      2
0035 #define NUM_EVENTS_VEGA20_MAX       NUM_EVENTS_VEGA20_XGMI
0036 #define NUM_EVENT_TYPES_ARCTURUS    1
0037 #define NUM_EVENTS_ARCTURUS_XGMI    6
0038 #define NUM_EVENTS_ARCTURUS_MAX     NUM_EVENTS_ARCTURUS_XGMI
0039 
0040 struct amdgpu_pmu_event_attribute {
0041     struct device_attribute attr;
0042     const char *event_str;
0043     unsigned int type;
0044 };
0045 
0046 /* record to keep track of pmu entry per pmu type per device */
0047 struct amdgpu_pmu_entry {
0048     struct list_head entry;
0049     struct amdgpu_device *adev;
0050     struct pmu pmu;
0051     unsigned int pmu_perf_type;
0052     char *pmu_type_name;
0053     char *pmu_file_prefix;
0054     struct attribute_group fmt_attr_group;
0055     struct amdgpu_pmu_event_attribute *fmt_attr;
0056     struct attribute_group evt_attr_group;
0057     struct amdgpu_pmu_event_attribute *evt_attr;
0058 };
0059 
0060 static ssize_t amdgpu_pmu_event_show(struct device *dev,
0061                 struct device_attribute *attr, char *buf)
0062 {
0063     struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr;
0064 
0065     amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute,
0066                                     attr);
0067 
0068     if (!amdgpu_pmu_attr->type)
0069         return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str);
0070 
0071     return sprintf(buf, "%s,type=0x%x\n",
0072             amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type);
0073 }
0074 
0075 static LIST_HEAD(amdgpu_pmu_list);
0076 
0077 
0078 struct amdgpu_pmu_attr {
0079     const char *name;
0080     const char *config;
0081 };
0082 
0083 struct amdgpu_pmu_type {
0084     const unsigned int type;
0085     const unsigned int num_of_type;
0086 };
0087 
0088 struct amdgpu_pmu_config {
0089     struct amdgpu_pmu_attr *formats;
0090     unsigned int num_formats;
0091     struct amdgpu_pmu_attr *events;
0092     unsigned int num_events;
0093     struct amdgpu_pmu_type *types;
0094     unsigned int num_types;
0095 };
0096 
0097 /*
0098  * Events fall under two categories:
0099  *  - PMU typed
0100  *    Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have
0101  *    performance counter operations handled by one IP <pmu_type>.  Formats and
0102  *    events should be defined by <pmu_type>_<asic_type>_formats and
0103  *    <pmu_type>_<asic_type>_events respectively.
0104  *
0105  *  - Event config typed
0106  *    Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance
0107  *    counter operations that can be handled by multiple IPs dictated by their
0108  *    "type" format field.  Formats and events should be defined by
0109  *    amdgpu_pmu_formats and <asic_type>_events respectively.  Format field
0110  *    "type" is generated in amdgpu_pmu_event_show and defined in
0111  *    <asic_type>_event_config_types.
0112  */
0113 
0114 static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = {
0115     { .name = "event", .config = "config:0-7" },
0116     { .name = "instance", .config = "config:8-15" },
0117     { .name = "umask", .config = "config:16-23"},
0118     { .name = "type", .config = "config:56-63"}
0119 };
0120 
0121 /* Vega20 events */
0122 static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = {
0123     { .name = "xgmi_link0_data_outbound",
0124             .config = "event=0x7,instance=0x46,umask=0x2" },
0125     { .name = "xgmi_link1_data_outbound",
0126             .config = "event=0x7,instance=0x47,umask=0x2" }
0127 };
0128 
0129 static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = {
0130     { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
0131                     .num_of_type = NUM_EVENTS_VEGA20_XGMI }
0132 };
0133 
0134 static struct amdgpu_pmu_config vega20_config = {
0135     .formats = amdgpu_pmu_formats,
0136     .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
0137     .events = vega20_events,
0138     .num_events = ARRAY_SIZE(vega20_events),
0139     .types = vega20_types,
0140     .num_types = ARRAY_SIZE(vega20_types)
0141 };
0142 
0143 /* Vega20 data fabric (DF) events */
0144 static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = {
0145     { .name = "event", .config = "config:0-7" },
0146     { .name = "instance", .config = "config:8-15" },
0147     { .name = "umask", .config = "config:16-23"}
0148 };
0149 
0150 static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = {
0151     { .name = "cake0_pcsout_txdata",
0152             .config = "event=0x7,instance=0x46,umask=0x2" },
0153     { .name = "cake1_pcsout_txdata",
0154             .config = "event=0x7,instance=0x47,umask=0x2" },
0155     { .name = "cake0_pcsout_txmeta",
0156             .config = "event=0x7,instance=0x46,umask=0x4" },
0157     { .name = "cake1_pcsout_txmeta",
0158             .config = "event=0x7,instance=0x47,umask=0x4" },
0159     { .name = "cake0_ftiinstat_reqalloc",
0160             .config = "event=0xb,instance=0x46,umask=0x4" },
0161     { .name = "cake1_ftiinstat_reqalloc",
0162             .config = "event=0xb,instance=0x47,umask=0x4" },
0163     { .name = "cake0_ftiinstat_rspalloc",
0164             .config = "event=0xb,instance=0x46,umask=0x8" },
0165     { .name = "cake1_ftiinstat_rspalloc",
0166             .config = "event=0xb,instance=0x47,umask=0x8" }
0167 };
0168 
0169 static struct amdgpu_pmu_config df_vega20_config = {
0170     .formats = df_vega20_formats,
0171     .num_formats = ARRAY_SIZE(df_vega20_formats),
0172     .events = df_vega20_events,
0173     .num_events = ARRAY_SIZE(df_vega20_events),
0174     .types = NULL,
0175     .num_types = 0
0176 };
0177 
0178 /* Arcturus events */
0179 static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = {
0180     { .name = "xgmi_link0_data_outbound",
0181             .config = "event=0x7,instance=0x4b,umask=0x2" },
0182     { .name = "xgmi_link1_data_outbound",
0183             .config = "event=0x7,instance=0x4c,umask=0x2" },
0184     { .name = "xgmi_link2_data_outbound",
0185             .config = "event=0x7,instance=0x4d,umask=0x2" },
0186     { .name = "xgmi_link3_data_outbound",
0187             .config = "event=0x7,instance=0x4e,umask=0x2" },
0188     { .name = "xgmi_link4_data_outbound",
0189             .config = "event=0x7,instance=0x4f,umask=0x2" },
0190     { .name = "xgmi_link5_data_outbound",
0191             .config = "event=0x7,instance=0x50,umask=0x2" }
0192 };
0193 
0194 static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = {
0195     { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
0196                 .num_of_type = NUM_EVENTS_ARCTURUS_XGMI }
0197 };
0198 
0199 static struct amdgpu_pmu_config arcturus_config = {
0200     .formats = amdgpu_pmu_formats,
0201     .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
0202     .events = arcturus_events,
0203     .num_events = ARRAY_SIZE(arcturus_events),
0204     .types = arcturus_types,
0205     .num_types = ARRAY_SIZE(arcturus_types)
0206 };
0207 
0208 /* initialize perf counter */
0209 static int amdgpu_perf_event_init(struct perf_event *event)
0210 {
0211     struct hw_perf_event *hwc = &event->hw;
0212 
0213     /* test the event attr type check for PMU enumeration */
0214     if (event->attr.type != event->pmu->type)
0215         return -ENOENT;
0216 
0217     /* update the hw_perf_event struct with config data */
0218     hwc->config = event->attr.config;
0219     hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE;
0220 
0221     return 0;
0222 }
0223 
0224 /* start perf counter */
0225 static void amdgpu_perf_start(struct perf_event *event, int flags)
0226 {
0227     struct hw_perf_event *hwc = &event->hw;
0228     struct amdgpu_pmu_entry *pe = container_of(event->pmu,
0229                           struct amdgpu_pmu_entry,
0230                           pmu);
0231     int target_cntr = 0;
0232 
0233     if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
0234         return;
0235 
0236     if ((!pe->adev->df.funcs) ||
0237         (!pe->adev->df.funcs->pmc_start))
0238         return;
0239 
0240     WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
0241     hwc->state = 0;
0242 
0243     switch (hwc->config_base) {
0244     case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
0245     case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
0246         if (!(flags & PERF_EF_RELOAD)) {
0247             target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
0248                         hwc->config, 0 /* unused */,
0249                         1 /* add counter */);
0250             if (target_cntr < 0)
0251                 break;
0252 
0253             hwc->idx = target_cntr;
0254         }
0255 
0256         pe->adev->df.funcs->pmc_start(pe->adev, hwc->config,
0257                                 hwc->idx, 0);
0258         break;
0259     default:
0260         break;
0261     }
0262 
0263     perf_event_update_userpage(event);
0264 }
0265 
0266 /* read perf counter */
0267 static void amdgpu_perf_read(struct perf_event *event)
0268 {
0269     struct hw_perf_event *hwc = &event->hw;
0270     struct amdgpu_pmu_entry *pe = container_of(event->pmu,
0271                           struct amdgpu_pmu_entry,
0272                           pmu);
0273     u64 count, prev;
0274 
0275     if ((!pe->adev->df.funcs) ||
0276         (!pe->adev->df.funcs->pmc_get_count))
0277         return;
0278 
0279     do {
0280         prev = local64_read(&hwc->prev_count);
0281 
0282         switch (hwc->config_base) {
0283         case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
0284         case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
0285             pe->adev->df.funcs->pmc_get_count(pe->adev,
0286                         hwc->config, hwc->idx, &count);
0287             break;
0288         default:
0289             count = 0;
0290             break;
0291         }
0292     } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
0293 
0294     local64_add(count - prev, &event->count);
0295 }
0296 
0297 /* stop perf counter */
0298 static void amdgpu_perf_stop(struct perf_event *event, int flags)
0299 {
0300     struct hw_perf_event *hwc = &event->hw;
0301     struct amdgpu_pmu_entry *pe = container_of(event->pmu,
0302                           struct amdgpu_pmu_entry,
0303                           pmu);
0304 
0305     if (hwc->state & PERF_HES_UPTODATE)
0306         return;
0307 
0308     if ((!pe->adev->df.funcs) ||
0309         (!pe->adev->df.funcs->pmc_stop))
0310         return;
0311 
0312     switch (hwc->config_base) {
0313     case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
0314     case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
0315         pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
0316                                     0);
0317         break;
0318     default:
0319         break;
0320     }
0321 
0322     WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
0323     hwc->state |= PERF_HES_STOPPED;
0324 
0325     if (hwc->state & PERF_HES_UPTODATE)
0326         return;
0327 
0328     amdgpu_perf_read(event);
0329     hwc->state |= PERF_HES_UPTODATE;
0330 }
0331 
0332 /* add perf counter */
0333 static int amdgpu_perf_add(struct perf_event *event, int flags)
0334 {
0335     struct hw_perf_event *hwc = &event->hw;
0336     int retval = 0, target_cntr;
0337     struct amdgpu_pmu_entry *pe = container_of(event->pmu,
0338                           struct amdgpu_pmu_entry,
0339                           pmu);
0340 
0341     if ((!pe->adev->df.funcs) ||
0342         (!pe->adev->df.funcs->pmc_start))
0343         return -EINVAL;
0344 
0345     switch (pe->pmu_perf_type) {
0346     case AMDGPU_PMU_PERF_TYPE_DF:
0347         hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
0348         break;
0349     case AMDGPU_PMU_PERF_TYPE_ALL:
0350         hwc->config_base = (hwc->config >>
0351                     AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) &
0352                     AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK;
0353         break;
0354     }
0355 
0356     event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
0357 
0358     switch (hwc->config_base) {
0359     case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
0360     case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
0361         target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
0362                         hwc->config, 0 /* unused */,
0363                         1 /* add counter */);
0364         if (target_cntr < 0)
0365             retval = target_cntr;
0366         else
0367             hwc->idx = target_cntr;
0368 
0369         break;
0370     default:
0371         return 0;
0372     }
0373 
0374     if (retval)
0375         return retval;
0376 
0377     if (flags & PERF_EF_START)
0378         amdgpu_perf_start(event, PERF_EF_RELOAD);
0379 
0380     return retval;
0381 }
0382 
0383 /* delete perf counter  */
0384 static void amdgpu_perf_del(struct perf_event *event, int flags)
0385 {
0386     struct hw_perf_event *hwc = &event->hw;
0387     struct amdgpu_pmu_entry *pe = container_of(event->pmu,
0388                           struct amdgpu_pmu_entry,
0389                           pmu);
0390     if ((!pe->adev->df.funcs) ||
0391         (!pe->adev->df.funcs->pmc_stop))
0392         return;
0393 
0394     amdgpu_perf_stop(event, PERF_EF_UPDATE);
0395 
0396     switch (hwc->config_base) {
0397     case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
0398     case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
0399         pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
0400                                     1);
0401         break;
0402     default:
0403         break;
0404     }
0405 
0406     perf_event_update_userpage(event);
0407 }
0408 
0409 static void amdgpu_pmu_create_event_attrs_by_type(
0410                 struct attribute_group *attr_group,
0411                 struct amdgpu_pmu_event_attribute *pmu_attr,
0412                 struct amdgpu_pmu_attr events[],
0413                 int s_offset,
0414                 int e_offset,
0415                 unsigned int type)
0416 {
0417     int i;
0418 
0419     pmu_attr += s_offset;
0420 
0421     for (i = s_offset; i < e_offset; i++) {
0422         attr_group->attrs[i] = &pmu_attr->attr.attr;
0423         sysfs_attr_init(&pmu_attr->attr.attr);
0424         pmu_attr->attr.attr.name = events[i].name;
0425         pmu_attr->attr.attr.mode = 0444;
0426         pmu_attr->attr.show = amdgpu_pmu_event_show;
0427         pmu_attr->event_str = events[i].config;
0428         pmu_attr->type = type;
0429         pmu_attr++;
0430     }
0431 }
0432 
0433 static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group,
0434                 struct amdgpu_pmu_event_attribute *pmu_attr,
0435                 struct amdgpu_pmu_attr events[],
0436                 int num_events)
0437 {
0438     amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0,
0439                 num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE);
0440 }
0441 
0442 
0443 static int amdgpu_pmu_alloc_pmu_attrs(
0444                 struct attribute_group *fmt_attr_group,
0445                 struct amdgpu_pmu_event_attribute **fmt_attr,
0446                 struct attribute_group *evt_attr_group,
0447                 struct amdgpu_pmu_event_attribute **evt_attr,
0448                 struct amdgpu_pmu_config *config)
0449 {
0450     *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr),
0451                                 GFP_KERNEL);
0452 
0453     if (!(*fmt_attr))
0454         return -ENOMEM;
0455 
0456     fmt_attr_group->attrs = kcalloc(config->num_formats + 1,
0457                 sizeof(*fmt_attr_group->attrs), GFP_KERNEL);
0458 
0459     if (!fmt_attr_group->attrs)
0460         goto err_fmt_attr_grp;
0461 
0462     *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL);
0463 
0464     if (!(*evt_attr))
0465         goto err_evt_attr;
0466 
0467     evt_attr_group->attrs = kcalloc(config->num_events + 1,
0468                 sizeof(*evt_attr_group->attrs), GFP_KERNEL);
0469 
0470     if (!evt_attr_group->attrs)
0471         goto err_evt_attr_grp;
0472 
0473     return 0;
0474 err_evt_attr_grp:
0475     kfree(*evt_attr);
0476 err_evt_attr:
0477     kfree(fmt_attr_group->attrs);
0478 err_fmt_attr_grp:
0479     kfree(*fmt_attr);
0480     return -ENOMEM;
0481 }
0482 
0483 /* init pmu tracking per pmu type */
0484 static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
0485             struct amdgpu_pmu_config *config)
0486 {
0487     const struct attribute_group *attr_groups[] = {
0488         &pmu_entry->fmt_attr_group,
0489         &pmu_entry->evt_attr_group,
0490         NULL
0491     };
0492     char pmu_name[PMU_NAME_SIZE];
0493     int ret = 0, total_num_events = 0;
0494 
0495     pmu_entry->pmu = (struct pmu){
0496         .event_init = amdgpu_perf_event_init,
0497         .add = amdgpu_perf_add,
0498         .del = amdgpu_perf_del,
0499         .start = amdgpu_perf_start,
0500         .stop = amdgpu_perf_stop,
0501         .read = amdgpu_perf_read,
0502         .task_ctx_nr = perf_invalid_context,
0503     };
0504 
0505     ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group,
0506                     &pmu_entry->fmt_attr,
0507                     &pmu_entry->evt_attr_group,
0508                     &pmu_entry->evt_attr,
0509                     config);
0510 
0511     if (ret)
0512         goto err_out;
0513 
0514     amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr,
0515                     config->formats, config->num_formats);
0516 
0517     if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) {
0518         int i;
0519 
0520         for (i = 0; i < config->num_types; i++) {
0521             amdgpu_pmu_create_event_attrs_by_type(
0522                     &pmu_entry->evt_attr_group,
0523                     pmu_entry->evt_attr,
0524                     config->events,
0525                     total_num_events,
0526                     total_num_events +
0527                         config->types[i].num_of_type,
0528                     config->types[i].type);
0529             total_num_events += config->types[i].num_of_type;
0530         }
0531     } else {
0532         amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group,
0533                     pmu_entry->evt_attr,
0534                     config->events, config->num_events);
0535         total_num_events = config->num_events;
0536     }
0537 
0538     pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
0539                                 GFP_KERNEL);
0540 
0541     if (!pmu_entry->pmu.attr_groups) {
0542         ret = -ENOMEM;
0543         goto err_attr_group;
0544     }
0545 
0546     snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
0547                 adev_to_drm(pmu_entry->adev)->primary->index);
0548 
0549     ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
0550 
0551     if (ret)
0552         goto err_register;
0553 
0554     if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL)
0555         pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
0556                 pmu_entry->pmu_type_name, total_num_events);
0557     else
0558         pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events);
0559 
0560 
0561     list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
0562 
0563     return 0;
0564 err_register:
0565     kfree(pmu_entry->pmu.attr_groups);
0566 err_attr_group:
0567     kfree(pmu_entry->fmt_attr_group.attrs);
0568     kfree(pmu_entry->fmt_attr);
0569     kfree(pmu_entry->evt_attr_group.attrs);
0570     kfree(pmu_entry->evt_attr);
0571 err_out:
0572     pr_warn("Error initializing AMDGPU %s PMUs.\n",
0573                         pmu_entry->pmu_type_name);
0574     return ret;
0575 }
0576 
0577 /* destroy all pmu data associated with target device */
0578 void amdgpu_pmu_fini(struct amdgpu_device *adev)
0579 {
0580     struct amdgpu_pmu_entry *pe, *temp;
0581 
0582     list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
0583         if (pe->adev != adev)
0584             continue;
0585         list_del(&pe->entry);
0586         perf_pmu_unregister(&pe->pmu);
0587         kfree(pe->pmu.attr_groups);
0588         kfree(pe->fmt_attr_group.attrs);
0589         kfree(pe->fmt_attr);
0590         kfree(pe->evt_attr_group.attrs);
0591         kfree(pe->evt_attr);
0592         kfree(pe);
0593     }
0594 }
0595 
0596 static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev,
0597                         unsigned int pmu_type,
0598                         char *pmu_type_name,
0599                         char *pmu_file_prefix)
0600 {
0601     struct amdgpu_pmu_entry *pmu_entry;
0602 
0603     pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
0604 
0605     if (!pmu_entry)
0606         return pmu_entry;
0607 
0608     pmu_entry->adev = adev;
0609     pmu_entry->fmt_attr_group.name = "format";
0610     pmu_entry->fmt_attr_group.attrs = NULL;
0611     pmu_entry->evt_attr_group.name = "events";
0612     pmu_entry->evt_attr_group.attrs = NULL;
0613     pmu_entry->pmu_perf_type = pmu_type;
0614     pmu_entry->pmu_type_name = pmu_type_name;
0615     pmu_entry->pmu_file_prefix = pmu_file_prefix;
0616 
0617     return pmu_entry;
0618 }
0619 
0620 /* init amdgpu_pmu */
0621 int amdgpu_pmu_init(struct amdgpu_device *adev)
0622 {
0623     int ret = 0;
0624     struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df;
0625 
0626     switch (adev->asic_type) {
0627     case CHIP_VEGA20:
0628         pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF,
0629                         "DF", "amdgpu_df");
0630 
0631         if (!pmu_entry_df)
0632             return -ENOMEM;
0633 
0634         ret = init_pmu_entry_by_type_and_add(pmu_entry_df,
0635                             &df_vega20_config);
0636 
0637         if (ret) {
0638             kfree(pmu_entry_df);
0639             return ret;
0640         }
0641 
0642         pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
0643                         "", "amdgpu");
0644 
0645         if (!pmu_entry) {
0646             amdgpu_pmu_fini(adev);
0647             return -ENOMEM;
0648         }
0649 
0650         ret = init_pmu_entry_by_type_and_add(pmu_entry,
0651                             &vega20_config);
0652 
0653         if (ret) {
0654             kfree(pmu_entry);
0655             amdgpu_pmu_fini(adev);
0656             return ret;
0657         }
0658 
0659         break;
0660     case CHIP_ARCTURUS:
0661         pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
0662                         "", "amdgpu");
0663         if (!pmu_entry)
0664             return -ENOMEM;
0665 
0666         ret = init_pmu_entry_by_type_and_add(pmu_entry,
0667                             &arcturus_config);
0668 
0669         if (ret) {
0670             kfree(pmu_entry);
0671             return -ENOMEM;
0672         }
0673 
0674         break;
0675 
0676     default:
0677         return 0;
0678     }
0679 
0680     return ret;
0681 }