0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/types.h>
0025 #include <linux/kernel.h>
0026 #include <linux/pci.h>
0027 #include <linux/errno.h>
0028 #include <linux/acpi.h>
0029 #include <linux/hash.h>
0030 #include <linux/cpufreq.h>
0031 #include <linux/log2.h>
0032 #include <linux/dmi.h>
0033 #include <linux/atomic.h>
0034
0035 #include "kfd_priv.h"
0036 #include "kfd_crat.h"
0037 #include "kfd_topology.h"
0038 #include "kfd_device_queue_manager.h"
0039 #include "kfd_iommu.h"
0040 #include "kfd_svm.h"
0041 #include "amdgpu_amdkfd.h"
0042 #include "amdgpu_ras.h"
0043 #include "amdgpu.h"
0044
0045
0046 static struct list_head topology_device_list;
0047 static struct kfd_system_properties sys_props;
0048
0049 static DECLARE_RWSEM(topology_lock);
0050 static uint32_t topology_crat_proximity_domain;
0051
0052 struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
0053 uint32_t proximity_domain)
0054 {
0055 struct kfd_topology_device *top_dev;
0056 struct kfd_topology_device *device = NULL;
0057
0058 list_for_each_entry(top_dev, &topology_device_list, list)
0059 if (top_dev->proximity_domain == proximity_domain) {
0060 device = top_dev;
0061 break;
0062 }
0063
0064 return device;
0065 }
0066
0067 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
0068 uint32_t proximity_domain)
0069 {
0070 struct kfd_topology_device *device = NULL;
0071
0072 down_read(&topology_lock);
0073
0074 device = kfd_topology_device_by_proximity_domain_no_lock(
0075 proximity_domain);
0076 up_read(&topology_lock);
0077
0078 return device;
0079 }
0080
0081 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
0082 {
0083 struct kfd_topology_device *top_dev = NULL;
0084 struct kfd_topology_device *ret = NULL;
0085
0086 down_read(&topology_lock);
0087
0088 list_for_each_entry(top_dev, &topology_device_list, list)
0089 if (top_dev->gpu_id == gpu_id) {
0090 ret = top_dev;
0091 break;
0092 }
0093
0094 up_read(&topology_lock);
0095
0096 return ret;
0097 }
0098
0099 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
0100 {
0101 struct kfd_topology_device *top_dev;
0102
0103 top_dev = kfd_topology_device_by_id(gpu_id);
0104 if (!top_dev)
0105 return NULL;
0106
0107 return top_dev->gpu;
0108 }
0109
0110 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
0111 {
0112 struct kfd_topology_device *top_dev;
0113 struct kfd_dev *device = NULL;
0114
0115 down_read(&topology_lock);
0116
0117 list_for_each_entry(top_dev, &topology_device_list, list)
0118 if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
0119 device = top_dev->gpu;
0120 break;
0121 }
0122
0123 up_read(&topology_lock);
0124
0125 return device;
0126 }
0127
0128 struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev)
0129 {
0130 struct kfd_topology_device *top_dev;
0131 struct kfd_dev *device = NULL;
0132
0133 down_read(&topology_lock);
0134
0135 list_for_each_entry(top_dev, &topology_device_list, list)
0136 if (top_dev->gpu && top_dev->gpu->adev == adev) {
0137 device = top_dev->gpu;
0138 break;
0139 }
0140
0141 up_read(&topology_lock);
0142
0143 return device;
0144 }
0145
0146
0147 static void kfd_release_topology_device(struct kfd_topology_device *dev)
0148 {
0149 struct kfd_mem_properties *mem;
0150 struct kfd_cache_properties *cache;
0151 struct kfd_iolink_properties *iolink;
0152 struct kfd_iolink_properties *p2plink;
0153 struct kfd_perf_properties *perf;
0154
0155 list_del(&dev->list);
0156
0157 while (dev->mem_props.next != &dev->mem_props) {
0158 mem = container_of(dev->mem_props.next,
0159 struct kfd_mem_properties, list);
0160 list_del(&mem->list);
0161 kfree(mem);
0162 }
0163
0164 while (dev->cache_props.next != &dev->cache_props) {
0165 cache = container_of(dev->cache_props.next,
0166 struct kfd_cache_properties, list);
0167 list_del(&cache->list);
0168 kfree(cache);
0169 }
0170
0171 while (dev->io_link_props.next != &dev->io_link_props) {
0172 iolink = container_of(dev->io_link_props.next,
0173 struct kfd_iolink_properties, list);
0174 list_del(&iolink->list);
0175 kfree(iolink);
0176 }
0177
0178 while (dev->p2p_link_props.next != &dev->p2p_link_props) {
0179 p2plink = container_of(dev->p2p_link_props.next,
0180 struct kfd_iolink_properties, list);
0181 list_del(&p2plink->list);
0182 kfree(p2plink);
0183 }
0184
0185 while (dev->perf_props.next != &dev->perf_props) {
0186 perf = container_of(dev->perf_props.next,
0187 struct kfd_perf_properties, list);
0188 list_del(&perf->list);
0189 kfree(perf);
0190 }
0191
0192 kfree(dev);
0193 }
0194
0195 void kfd_release_topology_device_list(struct list_head *device_list)
0196 {
0197 struct kfd_topology_device *dev;
0198
0199 while (!list_empty(device_list)) {
0200 dev = list_first_entry(device_list,
0201 struct kfd_topology_device, list);
0202 kfd_release_topology_device(dev);
0203 }
0204 }
0205
0206 static void kfd_release_live_view(void)
0207 {
0208 kfd_release_topology_device_list(&topology_device_list);
0209 memset(&sys_props, 0, sizeof(sys_props));
0210 }
0211
0212 struct kfd_topology_device *kfd_create_topology_device(
0213 struct list_head *device_list)
0214 {
0215 struct kfd_topology_device *dev;
0216
0217 dev = kfd_alloc_struct(dev);
0218 if (!dev) {
0219 pr_err("No memory to allocate a topology device");
0220 return NULL;
0221 }
0222
0223 INIT_LIST_HEAD(&dev->mem_props);
0224 INIT_LIST_HEAD(&dev->cache_props);
0225 INIT_LIST_HEAD(&dev->io_link_props);
0226 INIT_LIST_HEAD(&dev->p2p_link_props);
0227 INIT_LIST_HEAD(&dev->perf_props);
0228
0229 list_add_tail(&dev->list, device_list);
0230
0231 return dev;
0232 }
0233
0234
0235 #define sysfs_show_gen_prop(buffer, offs, fmt, ...) \
0236 (offs += snprintf(buffer+offs, PAGE_SIZE-offs, \
0237 fmt, __VA_ARGS__))
0238 #define sysfs_show_32bit_prop(buffer, offs, name, value) \
0239 sysfs_show_gen_prop(buffer, offs, "%s %u\n", name, value)
0240 #define sysfs_show_64bit_prop(buffer, offs, name, value) \
0241 sysfs_show_gen_prop(buffer, offs, "%s %llu\n", name, value)
0242 #define sysfs_show_32bit_val(buffer, offs, value) \
0243 sysfs_show_gen_prop(buffer, offs, "%u\n", value)
0244 #define sysfs_show_str_val(buffer, offs, value) \
0245 sysfs_show_gen_prop(buffer, offs, "%s\n", value)
0246
0247 static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
0248 char *buffer)
0249 {
0250 int offs = 0;
0251
0252
0253 buffer[0] = 0;
0254
0255 if (attr == &sys_props.attr_genid) {
0256 sysfs_show_32bit_val(buffer, offs,
0257 sys_props.generation_count);
0258 } else if (attr == &sys_props.attr_props) {
0259 sysfs_show_64bit_prop(buffer, offs, "platform_oem",
0260 sys_props.platform_oem);
0261 sysfs_show_64bit_prop(buffer, offs, "platform_id",
0262 sys_props.platform_id);
0263 sysfs_show_64bit_prop(buffer, offs, "platform_rev",
0264 sys_props.platform_rev);
0265 } else {
0266 offs = -EINVAL;
0267 }
0268
0269 return offs;
0270 }
0271
0272 static void kfd_topology_kobj_release(struct kobject *kobj)
0273 {
0274 kfree(kobj);
0275 }
0276
0277 static const struct sysfs_ops sysprops_ops = {
0278 .show = sysprops_show,
0279 };
0280
0281 static struct kobj_type sysprops_type = {
0282 .release = kfd_topology_kobj_release,
0283 .sysfs_ops = &sysprops_ops,
0284 };
0285
0286 static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
0287 char *buffer)
0288 {
0289 int offs = 0;
0290 struct kfd_iolink_properties *iolink;
0291
0292
0293 buffer[0] = 0;
0294
0295 iolink = container_of(attr, struct kfd_iolink_properties, attr);
0296 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
0297 return -EPERM;
0298 sysfs_show_32bit_prop(buffer, offs, "type", iolink->iolink_type);
0299 sysfs_show_32bit_prop(buffer, offs, "version_major", iolink->ver_maj);
0300 sysfs_show_32bit_prop(buffer, offs, "version_minor", iolink->ver_min);
0301 sysfs_show_32bit_prop(buffer, offs, "node_from", iolink->node_from);
0302 sysfs_show_32bit_prop(buffer, offs, "node_to", iolink->node_to);
0303 sysfs_show_32bit_prop(buffer, offs, "weight", iolink->weight);
0304 sysfs_show_32bit_prop(buffer, offs, "min_latency", iolink->min_latency);
0305 sysfs_show_32bit_prop(buffer, offs, "max_latency", iolink->max_latency);
0306 sysfs_show_32bit_prop(buffer, offs, "min_bandwidth",
0307 iolink->min_bandwidth);
0308 sysfs_show_32bit_prop(buffer, offs, "max_bandwidth",
0309 iolink->max_bandwidth);
0310 sysfs_show_32bit_prop(buffer, offs, "recommended_transfer_size",
0311 iolink->rec_transfer_size);
0312 sysfs_show_32bit_prop(buffer, offs, "flags", iolink->flags);
0313
0314 return offs;
0315 }
0316
0317 static const struct sysfs_ops iolink_ops = {
0318 .show = iolink_show,
0319 };
0320
0321 static struct kobj_type iolink_type = {
0322 .release = kfd_topology_kobj_release,
0323 .sysfs_ops = &iolink_ops,
0324 };
0325
0326 static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
0327 char *buffer)
0328 {
0329 int offs = 0;
0330 struct kfd_mem_properties *mem;
0331
0332
0333 buffer[0] = 0;
0334
0335 mem = container_of(attr, struct kfd_mem_properties, attr);
0336 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
0337 return -EPERM;
0338 sysfs_show_32bit_prop(buffer, offs, "heap_type", mem->heap_type);
0339 sysfs_show_64bit_prop(buffer, offs, "size_in_bytes",
0340 mem->size_in_bytes);
0341 sysfs_show_32bit_prop(buffer, offs, "flags", mem->flags);
0342 sysfs_show_32bit_prop(buffer, offs, "width", mem->width);
0343 sysfs_show_32bit_prop(buffer, offs, "mem_clk_max",
0344 mem->mem_clk_max);
0345
0346 return offs;
0347 }
0348
0349 static const struct sysfs_ops mem_ops = {
0350 .show = mem_show,
0351 };
0352
0353 static struct kobj_type mem_type = {
0354 .release = kfd_topology_kobj_release,
0355 .sysfs_ops = &mem_ops,
0356 };
0357
0358 static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
0359 char *buffer)
0360 {
0361 int offs = 0;
0362 uint32_t i, j;
0363 struct kfd_cache_properties *cache;
0364
0365
0366 buffer[0] = 0;
0367
0368 cache = container_of(attr, struct kfd_cache_properties, attr);
0369 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
0370 return -EPERM;
0371 sysfs_show_32bit_prop(buffer, offs, "processor_id_low",
0372 cache->processor_id_low);
0373 sysfs_show_32bit_prop(buffer, offs, "level", cache->cache_level);
0374 sysfs_show_32bit_prop(buffer, offs, "size", cache->cache_size);
0375 sysfs_show_32bit_prop(buffer, offs, "cache_line_size",
0376 cache->cacheline_size);
0377 sysfs_show_32bit_prop(buffer, offs, "cache_lines_per_tag",
0378 cache->cachelines_per_tag);
0379 sysfs_show_32bit_prop(buffer, offs, "association", cache->cache_assoc);
0380 sysfs_show_32bit_prop(buffer, offs, "latency", cache->cache_latency);
0381 sysfs_show_32bit_prop(buffer, offs, "type", cache->cache_type);
0382 offs += snprintf(buffer+offs, PAGE_SIZE-offs, "sibling_map ");
0383 for (i = 0; i < CRAT_SIBLINGMAP_SIZE; i++)
0384 for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++)
0385
0386 offs += snprintf(buffer+offs, PAGE_SIZE-offs, "%d,",
0387 (cache->sibling_map[i] >> j) & 1);
0388
0389
0390 buffer[offs-1] = '\n';
0391 return offs;
0392 }
0393
0394 static const struct sysfs_ops cache_ops = {
0395 .show = kfd_cache_show,
0396 };
0397
0398 static struct kobj_type cache_type = {
0399 .release = kfd_topology_kobj_release,
0400 .sysfs_ops = &cache_ops,
0401 };
0402
0403
0404
0405 struct kfd_perf_attr {
0406 struct kobj_attribute attr;
0407 uint32_t data;
0408 };
0409
0410 static ssize_t perf_show(struct kobject *kobj, struct kobj_attribute *attrs,
0411 char *buf)
0412 {
0413 int offs = 0;
0414 struct kfd_perf_attr *attr;
0415
0416 buf[0] = 0;
0417 attr = container_of(attrs, struct kfd_perf_attr, attr);
0418 if (!attr->data)
0419 return 0;
0420 else
0421 return sysfs_show_32bit_val(buf, offs, attr->data);
0422 }
0423
0424 #define KFD_PERF_DESC(_name, _data) \
0425 { \
0426 .attr = __ATTR(_name, 0444, perf_show, NULL), \
0427 .data = _data, \
0428 }
0429
0430 static struct kfd_perf_attr perf_attr_iommu[] = {
0431 KFD_PERF_DESC(max_concurrent, 0),
0432 KFD_PERF_DESC(num_counters, 0),
0433 KFD_PERF_DESC(counter_ids, 0),
0434 };
0435
0436
0437 static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
0438 char *buffer)
0439 {
0440 int offs = 0;
0441 struct kfd_topology_device *dev;
0442 uint32_t log_max_watch_addr;
0443
0444
0445 buffer[0] = 0;
0446
0447 if (strcmp(attr->name, "gpu_id") == 0) {
0448 dev = container_of(attr, struct kfd_topology_device,
0449 attr_gpuid);
0450 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
0451 return -EPERM;
0452 return sysfs_show_32bit_val(buffer, offs, dev->gpu_id);
0453 }
0454
0455 if (strcmp(attr->name, "name") == 0) {
0456 dev = container_of(attr, struct kfd_topology_device,
0457 attr_name);
0458
0459 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
0460 return -EPERM;
0461 return sysfs_show_str_val(buffer, offs, dev->node_props.name);
0462 }
0463
0464 dev = container_of(attr, struct kfd_topology_device,
0465 attr_props);
0466 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
0467 return -EPERM;
0468 sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
0469 dev->node_props.cpu_cores_count);
0470 sysfs_show_32bit_prop(buffer, offs, "simd_count",
0471 dev->gpu ? dev->node_props.simd_count : 0);
0472 sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
0473 dev->node_props.mem_banks_count);
0474 sysfs_show_32bit_prop(buffer, offs, "caches_count",
0475 dev->node_props.caches_count);
0476 sysfs_show_32bit_prop(buffer, offs, "io_links_count",
0477 dev->node_props.io_links_count);
0478 sysfs_show_32bit_prop(buffer, offs, "p2p_links_count",
0479 dev->node_props.p2p_links_count);
0480 sysfs_show_32bit_prop(buffer, offs, "cpu_core_id_base",
0481 dev->node_props.cpu_core_id_base);
0482 sysfs_show_32bit_prop(buffer, offs, "simd_id_base",
0483 dev->node_props.simd_id_base);
0484 sysfs_show_32bit_prop(buffer, offs, "max_waves_per_simd",
0485 dev->node_props.max_waves_per_simd);
0486 sysfs_show_32bit_prop(buffer, offs, "lds_size_in_kb",
0487 dev->node_props.lds_size_in_kb);
0488 sysfs_show_32bit_prop(buffer, offs, "gds_size_in_kb",
0489 dev->node_props.gds_size_in_kb);
0490 sysfs_show_32bit_prop(buffer, offs, "num_gws",
0491 dev->node_props.num_gws);
0492 sysfs_show_32bit_prop(buffer, offs, "wave_front_size",
0493 dev->node_props.wave_front_size);
0494 sysfs_show_32bit_prop(buffer, offs, "array_count",
0495 dev->node_props.array_count);
0496 sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine",
0497 dev->node_props.simd_arrays_per_engine);
0498 sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array",
0499 dev->node_props.cu_per_simd_array);
0500 sysfs_show_32bit_prop(buffer, offs, "simd_per_cu",
0501 dev->node_props.simd_per_cu);
0502 sysfs_show_32bit_prop(buffer, offs, "max_slots_scratch_cu",
0503 dev->node_props.max_slots_scratch_cu);
0504 sysfs_show_32bit_prop(buffer, offs, "gfx_target_version",
0505 dev->node_props.gfx_target_version);
0506 sysfs_show_32bit_prop(buffer, offs, "vendor_id",
0507 dev->node_props.vendor_id);
0508 sysfs_show_32bit_prop(buffer, offs, "device_id",
0509 dev->node_props.device_id);
0510 sysfs_show_32bit_prop(buffer, offs, "location_id",
0511 dev->node_props.location_id);
0512 sysfs_show_32bit_prop(buffer, offs, "domain",
0513 dev->node_props.domain);
0514 sysfs_show_32bit_prop(buffer, offs, "drm_render_minor",
0515 dev->node_props.drm_render_minor);
0516 sysfs_show_64bit_prop(buffer, offs, "hive_id",
0517 dev->node_props.hive_id);
0518 sysfs_show_32bit_prop(buffer, offs, "num_sdma_engines",
0519 dev->node_props.num_sdma_engines);
0520 sysfs_show_32bit_prop(buffer, offs, "num_sdma_xgmi_engines",
0521 dev->node_props.num_sdma_xgmi_engines);
0522 sysfs_show_32bit_prop(buffer, offs, "num_sdma_queues_per_engine",
0523 dev->node_props.num_sdma_queues_per_engine);
0524 sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
0525 dev->node_props.num_cp_queues);
0526
0527 if (dev->gpu) {
0528 log_max_watch_addr =
0529 __ilog2_u32(dev->gpu->device_info.num_of_watch_points);
0530
0531 if (log_max_watch_addr) {
0532 dev->node_props.capability |=
0533 HSA_CAP_WATCH_POINTS_SUPPORTED;
0534
0535 dev->node_props.capability |=
0536 ((log_max_watch_addr <<
0537 HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
0538 HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
0539 }
0540
0541 if (dev->gpu->adev->asic_type == CHIP_TONGA)
0542 dev->node_props.capability |=
0543 HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
0544
0545 sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
0546 dev->node_props.max_engine_clk_fcompute);
0547
0548 sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL);
0549
0550 sysfs_show_32bit_prop(buffer, offs, "fw_version",
0551 dev->gpu->mec_fw_version);
0552 sysfs_show_32bit_prop(buffer, offs, "capability",
0553 dev->node_props.capability);
0554 sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
0555 dev->gpu->sdma_fw_version);
0556 sysfs_show_64bit_prop(buffer, offs, "unique_id",
0557 dev->gpu->adev->unique_id);
0558
0559 }
0560
0561 return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
0562 cpufreq_quick_get_max(0)/1000);
0563 }
0564
0565 static const struct sysfs_ops node_ops = {
0566 .show = node_show,
0567 };
0568
0569 static struct kobj_type node_type = {
0570 .release = kfd_topology_kobj_release,
0571 .sysfs_ops = &node_ops,
0572 };
0573
0574 static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
0575 {
0576 sysfs_remove_file(kobj, attr);
0577 kobject_del(kobj);
0578 kobject_put(kobj);
0579 }
0580
0581 static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
0582 {
0583 struct kfd_iolink_properties *p2plink;
0584 struct kfd_iolink_properties *iolink;
0585 struct kfd_cache_properties *cache;
0586 struct kfd_mem_properties *mem;
0587 struct kfd_perf_properties *perf;
0588
0589 if (dev->kobj_iolink) {
0590 list_for_each_entry(iolink, &dev->io_link_props, list)
0591 if (iolink->kobj) {
0592 kfd_remove_sysfs_file(iolink->kobj,
0593 &iolink->attr);
0594 iolink->kobj = NULL;
0595 }
0596 kobject_del(dev->kobj_iolink);
0597 kobject_put(dev->kobj_iolink);
0598 dev->kobj_iolink = NULL;
0599 }
0600
0601 if (dev->kobj_p2plink) {
0602 list_for_each_entry(p2plink, &dev->p2p_link_props, list)
0603 if (p2plink->kobj) {
0604 kfd_remove_sysfs_file(p2plink->kobj,
0605 &p2plink->attr);
0606 p2plink->kobj = NULL;
0607 }
0608 kobject_del(dev->kobj_p2plink);
0609 kobject_put(dev->kobj_p2plink);
0610 dev->kobj_p2plink = NULL;
0611 }
0612
0613 if (dev->kobj_cache) {
0614 list_for_each_entry(cache, &dev->cache_props, list)
0615 if (cache->kobj) {
0616 kfd_remove_sysfs_file(cache->kobj,
0617 &cache->attr);
0618 cache->kobj = NULL;
0619 }
0620 kobject_del(dev->kobj_cache);
0621 kobject_put(dev->kobj_cache);
0622 dev->kobj_cache = NULL;
0623 }
0624
0625 if (dev->kobj_mem) {
0626 list_for_each_entry(mem, &dev->mem_props, list)
0627 if (mem->kobj) {
0628 kfd_remove_sysfs_file(mem->kobj, &mem->attr);
0629 mem->kobj = NULL;
0630 }
0631 kobject_del(dev->kobj_mem);
0632 kobject_put(dev->kobj_mem);
0633 dev->kobj_mem = NULL;
0634 }
0635
0636 if (dev->kobj_perf) {
0637 list_for_each_entry(perf, &dev->perf_props, list) {
0638 kfree(perf->attr_group);
0639 perf->attr_group = NULL;
0640 }
0641 kobject_del(dev->kobj_perf);
0642 kobject_put(dev->kobj_perf);
0643 dev->kobj_perf = NULL;
0644 }
0645
0646 if (dev->kobj_node) {
0647 sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
0648 sysfs_remove_file(dev->kobj_node, &dev->attr_name);
0649 sysfs_remove_file(dev->kobj_node, &dev->attr_props);
0650 kobject_del(dev->kobj_node);
0651 kobject_put(dev->kobj_node);
0652 dev->kobj_node = NULL;
0653 }
0654 }
0655
0656 static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
0657 uint32_t id)
0658 {
0659 struct kfd_iolink_properties *p2plink;
0660 struct kfd_iolink_properties *iolink;
0661 struct kfd_cache_properties *cache;
0662 struct kfd_mem_properties *mem;
0663 struct kfd_perf_properties *perf;
0664 int ret;
0665 uint32_t i, num_attrs;
0666 struct attribute **attrs;
0667
0668 if (WARN_ON(dev->kobj_node))
0669 return -EEXIST;
0670
0671
0672
0673
0674 dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
0675 if (!dev->kobj_node)
0676 return -ENOMEM;
0677
0678 ret = kobject_init_and_add(dev->kobj_node, &node_type,
0679 sys_props.kobj_nodes, "%d", id);
0680 if (ret < 0) {
0681 kobject_put(dev->kobj_node);
0682 return ret;
0683 }
0684
0685 dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
0686 if (!dev->kobj_mem)
0687 return -ENOMEM;
0688
0689 dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
0690 if (!dev->kobj_cache)
0691 return -ENOMEM;
0692
0693 dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
0694 if (!dev->kobj_iolink)
0695 return -ENOMEM;
0696
0697 dev->kobj_p2plink = kobject_create_and_add("p2p_links", dev->kobj_node);
0698 if (!dev->kobj_p2plink)
0699 return -ENOMEM;
0700
0701 dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node);
0702 if (!dev->kobj_perf)
0703 return -ENOMEM;
0704
0705
0706
0707
0708 dev->attr_gpuid.name = "gpu_id";
0709 dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
0710 sysfs_attr_init(&dev->attr_gpuid);
0711 dev->attr_name.name = "name";
0712 dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
0713 sysfs_attr_init(&dev->attr_name);
0714 dev->attr_props.name = "properties";
0715 dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
0716 sysfs_attr_init(&dev->attr_props);
0717 ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
0718 if (ret < 0)
0719 return ret;
0720 ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
0721 if (ret < 0)
0722 return ret;
0723 ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
0724 if (ret < 0)
0725 return ret;
0726
0727 i = 0;
0728 list_for_each_entry(mem, &dev->mem_props, list) {
0729 mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
0730 if (!mem->kobj)
0731 return -ENOMEM;
0732 ret = kobject_init_and_add(mem->kobj, &mem_type,
0733 dev->kobj_mem, "%d", i);
0734 if (ret < 0) {
0735 kobject_put(mem->kobj);
0736 return ret;
0737 }
0738
0739 mem->attr.name = "properties";
0740 mem->attr.mode = KFD_SYSFS_FILE_MODE;
0741 sysfs_attr_init(&mem->attr);
0742 ret = sysfs_create_file(mem->kobj, &mem->attr);
0743 if (ret < 0)
0744 return ret;
0745 i++;
0746 }
0747
0748 i = 0;
0749 list_for_each_entry(cache, &dev->cache_props, list) {
0750 cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
0751 if (!cache->kobj)
0752 return -ENOMEM;
0753 ret = kobject_init_and_add(cache->kobj, &cache_type,
0754 dev->kobj_cache, "%d", i);
0755 if (ret < 0) {
0756 kobject_put(cache->kobj);
0757 return ret;
0758 }
0759
0760 cache->attr.name = "properties";
0761 cache->attr.mode = KFD_SYSFS_FILE_MODE;
0762 sysfs_attr_init(&cache->attr);
0763 ret = sysfs_create_file(cache->kobj, &cache->attr);
0764 if (ret < 0)
0765 return ret;
0766 i++;
0767 }
0768
0769 i = 0;
0770 list_for_each_entry(iolink, &dev->io_link_props, list) {
0771 iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
0772 if (!iolink->kobj)
0773 return -ENOMEM;
0774 ret = kobject_init_and_add(iolink->kobj, &iolink_type,
0775 dev->kobj_iolink, "%d", i);
0776 if (ret < 0) {
0777 kobject_put(iolink->kobj);
0778 return ret;
0779 }
0780
0781 iolink->attr.name = "properties";
0782 iolink->attr.mode = KFD_SYSFS_FILE_MODE;
0783 sysfs_attr_init(&iolink->attr);
0784 ret = sysfs_create_file(iolink->kobj, &iolink->attr);
0785 if (ret < 0)
0786 return ret;
0787 i++;
0788 }
0789
0790 i = 0;
0791 list_for_each_entry(p2plink, &dev->p2p_link_props, list) {
0792 p2plink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
0793 if (!p2plink->kobj)
0794 return -ENOMEM;
0795 ret = kobject_init_and_add(p2plink->kobj, &iolink_type,
0796 dev->kobj_p2plink, "%d", i);
0797 if (ret < 0) {
0798 kobject_put(p2plink->kobj);
0799 return ret;
0800 }
0801
0802 p2plink->attr.name = "properties";
0803 p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
0804 sysfs_attr_init(&iolink->attr);
0805 ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
0806 if (ret < 0)
0807 return ret;
0808 i++;
0809 }
0810
0811
0812 num_attrs = ARRAY_SIZE(perf_attr_iommu);
0813 list_for_each_entry(perf, &dev->perf_props, list) {
0814 perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
0815 * num_attrs + sizeof(struct attribute_group),
0816 GFP_KERNEL);
0817 if (!perf->attr_group)
0818 return -ENOMEM;
0819
0820 attrs = (struct attribute **)(perf->attr_group + 1);
0821 if (!strcmp(perf->block_name, "iommu")) {
0822
0823
0824
0825
0826 perf_attr_iommu[0].data = perf->max_concurrent;
0827 for (i = 0; i < num_attrs; i++)
0828 attrs[i] = &perf_attr_iommu[i].attr.attr;
0829 }
0830 perf->attr_group->name = perf->block_name;
0831 perf->attr_group->attrs = attrs;
0832 ret = sysfs_create_group(dev->kobj_perf, perf->attr_group);
0833 if (ret < 0)
0834 return ret;
0835 }
0836
0837 return 0;
0838 }
0839
0840
0841 static int kfd_build_sysfs_node_tree(void)
0842 {
0843 struct kfd_topology_device *dev;
0844 int ret;
0845 uint32_t i = 0;
0846
0847 list_for_each_entry(dev, &topology_device_list, list) {
0848 ret = kfd_build_sysfs_node_entry(dev, i);
0849 if (ret < 0)
0850 return ret;
0851 i++;
0852 }
0853
0854 return 0;
0855 }
0856
0857
0858 static void kfd_remove_sysfs_node_tree(void)
0859 {
0860 struct kfd_topology_device *dev;
0861
0862 list_for_each_entry(dev, &topology_device_list, list)
0863 kfd_remove_sysfs_node_entry(dev);
0864 }
0865
0866 static int kfd_topology_update_sysfs(void)
0867 {
0868 int ret;
0869
0870 if (!sys_props.kobj_topology) {
0871 sys_props.kobj_topology =
0872 kfd_alloc_struct(sys_props.kobj_topology);
0873 if (!sys_props.kobj_topology)
0874 return -ENOMEM;
0875
0876 ret = kobject_init_and_add(sys_props.kobj_topology,
0877 &sysprops_type, &kfd_device->kobj,
0878 "topology");
0879 if (ret < 0) {
0880 kobject_put(sys_props.kobj_topology);
0881 return ret;
0882 }
0883
0884 sys_props.kobj_nodes = kobject_create_and_add("nodes",
0885 sys_props.kobj_topology);
0886 if (!sys_props.kobj_nodes)
0887 return -ENOMEM;
0888
0889 sys_props.attr_genid.name = "generation_id";
0890 sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
0891 sysfs_attr_init(&sys_props.attr_genid);
0892 ret = sysfs_create_file(sys_props.kobj_topology,
0893 &sys_props.attr_genid);
0894 if (ret < 0)
0895 return ret;
0896
0897 sys_props.attr_props.name = "system_properties";
0898 sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
0899 sysfs_attr_init(&sys_props.attr_props);
0900 ret = sysfs_create_file(sys_props.kobj_topology,
0901 &sys_props.attr_props);
0902 if (ret < 0)
0903 return ret;
0904 }
0905
0906 kfd_remove_sysfs_node_tree();
0907
0908 return kfd_build_sysfs_node_tree();
0909 }
0910
0911 static void kfd_topology_release_sysfs(void)
0912 {
0913 kfd_remove_sysfs_node_tree();
0914 if (sys_props.kobj_topology) {
0915 sysfs_remove_file(sys_props.kobj_topology,
0916 &sys_props.attr_genid);
0917 sysfs_remove_file(sys_props.kobj_topology,
0918 &sys_props.attr_props);
0919 if (sys_props.kobj_nodes) {
0920 kobject_del(sys_props.kobj_nodes);
0921 kobject_put(sys_props.kobj_nodes);
0922 sys_props.kobj_nodes = NULL;
0923 }
0924 kobject_del(sys_props.kobj_topology);
0925 kobject_put(sys_props.kobj_topology);
0926 sys_props.kobj_topology = NULL;
0927 }
0928 }
0929
0930
0931 static void kfd_topology_update_device_list(struct list_head *temp_list,
0932 struct list_head *master_list)
0933 {
0934 while (!list_empty(temp_list)) {
0935 list_move_tail(temp_list->next, master_list);
0936 sys_props.num_devices++;
0937 }
0938 }
0939
0940 static void kfd_debug_print_topology(void)
0941 {
0942 struct kfd_topology_device *dev;
0943
0944 down_read(&topology_lock);
0945
0946 dev = list_last_entry(&topology_device_list,
0947 struct kfd_topology_device, list);
0948 if (dev) {
0949 if (dev->node_props.cpu_cores_count &&
0950 dev->node_props.simd_count) {
0951 pr_info("Topology: Add APU node [0x%0x:0x%0x]\n",
0952 dev->node_props.device_id,
0953 dev->node_props.vendor_id);
0954 } else if (dev->node_props.cpu_cores_count)
0955 pr_info("Topology: Add CPU node\n");
0956 else if (dev->node_props.simd_count)
0957 pr_info("Topology: Add dGPU node [0x%0x:0x%0x]\n",
0958 dev->node_props.device_id,
0959 dev->node_props.vendor_id);
0960 }
0961 up_read(&topology_lock);
0962 }
0963
0964
0965
0966
0967 static void kfd_update_system_properties(void)
0968 {
0969 struct kfd_topology_device *dev;
0970
0971 down_read(&topology_lock);
0972 dev = list_last_entry(&topology_device_list,
0973 struct kfd_topology_device, list);
0974 if (dev) {
0975 sys_props.platform_id =
0976 (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
0977 sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
0978 sys_props.platform_rev = dev->oem_revision;
0979 }
0980 up_read(&topology_lock);
0981 }
0982
0983 static void find_system_memory(const struct dmi_header *dm,
0984 void *private)
0985 {
0986 struct kfd_mem_properties *mem;
0987 u16 mem_width, mem_clock;
0988 struct kfd_topology_device *kdev =
0989 (struct kfd_topology_device *)private;
0990 const u8 *dmi_data = (const u8 *)(dm + 1);
0991
0992 if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
0993 mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
0994 mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
0995 list_for_each_entry(mem, &kdev->mem_props, list) {
0996 if (mem_width != 0xFFFF && mem_width != 0)
0997 mem->width = mem_width;
0998 if (mem_clock != 0)
0999 mem->mem_clk_max = mem_clock;
1000 }
1001 }
1002 }
1003
1004
1005
1006
1007
1008
1009 static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
1010 {
1011
1012 return kfd_iommu_add_perf_counters(kdev);
1013 }
1014
1015
1016
1017
1018
1019 static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
1020 {
1021
1022 if (!kdev->gpu) {
1023
1024 dmi_walk(find_system_memory, kdev);
1025 }
1026
1027 }
1028
1029
1030
1031
1032
1033
1034
1035 static bool kfd_is_acpi_crat_invalid(struct list_head *device_list)
1036 {
1037 struct kfd_topology_device *dev;
1038
1039 list_for_each_entry(dev, device_list, list) {
1040 if (dev->node_props.cpu_cores_count &&
1041 dev->node_props.simd_count)
1042 return false;
1043 }
1044 pr_info("Ignoring ACPI CRAT on non-APU system\n");
1045 return true;
1046 }
1047
1048 int kfd_topology_init(void)
1049 {
1050 void *crat_image = NULL;
1051 size_t image_size = 0;
1052 int ret;
1053 struct list_head temp_topology_device_list;
1054 int cpu_only_node = 0;
1055 struct kfd_topology_device *kdev;
1056 int proximity_domain;
1057
1058
1059
1060
1061
1062
1063
1064
1065 INIT_LIST_HEAD(&topology_device_list);
1066 INIT_LIST_HEAD(&temp_topology_device_list);
1067 init_rwsem(&topology_lock);
1068
1069 memset(&sys_props, 0, sizeof(sys_props));
1070
1071
1072
1073
1074
1075
1076 proximity_domain = 0;
1077
1078
1079
1080
1081
1082
1083
1084 ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
1085 if (!ret) {
1086 ret = kfd_parse_crat_table(crat_image,
1087 &temp_topology_device_list,
1088 proximity_domain);
1089 if (ret ||
1090 kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
1091 kfd_release_topology_device_list(
1092 &temp_topology_device_list);
1093 kfd_destroy_crat_image(crat_image);
1094 crat_image = NULL;
1095 }
1096 }
1097
1098 if (!crat_image) {
1099 ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
1100 COMPUTE_UNIT_CPU, NULL,
1101 proximity_domain);
1102 cpu_only_node = 1;
1103 if (ret) {
1104 pr_err("Error creating VCRAT table for CPU\n");
1105 return ret;
1106 }
1107
1108 ret = kfd_parse_crat_table(crat_image,
1109 &temp_topology_device_list,
1110 proximity_domain);
1111 if (ret) {
1112 pr_err("Error parsing VCRAT table for CPU\n");
1113 goto err;
1114 }
1115 }
1116
1117 kdev = list_first_entry(&temp_topology_device_list,
1118 struct kfd_topology_device, list);
1119 kfd_add_perf_to_topology(kdev);
1120
1121 down_write(&topology_lock);
1122 kfd_topology_update_device_list(&temp_topology_device_list,
1123 &topology_device_list);
1124 topology_crat_proximity_domain = sys_props.num_devices-1;
1125 ret = kfd_topology_update_sysfs();
1126 up_write(&topology_lock);
1127
1128 if (!ret) {
1129 sys_props.generation_count++;
1130 kfd_update_system_properties();
1131 kfd_debug_print_topology();
1132 } else
1133 pr_err("Failed to update topology in sysfs ret=%d\n", ret);
1134
1135
1136
1137
1138 if (cpu_only_node) {
1139
1140 down_write(&topology_lock);
1141 kdev = list_first_entry(&topology_device_list,
1142 struct kfd_topology_device, list);
1143 up_write(&topology_lock);
1144 kfd_add_non_crat_information(kdev);
1145 }
1146
1147 err:
1148 kfd_destroy_crat_image(crat_image);
1149 return ret;
1150 }
1151
1152 void kfd_topology_shutdown(void)
1153 {
1154 down_write(&topology_lock);
1155 kfd_topology_release_sysfs();
1156 kfd_release_live_view();
1157 up_write(&topology_lock);
1158 }
1159
1160 static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1161 {
1162 uint32_t hashout;
1163 uint32_t buf[7];
1164 uint64_t local_mem_size;
1165 int i;
1166
1167 if (!gpu)
1168 return 0;
1169
1170 local_mem_size = gpu->local_mem_info.local_mem_size_private +
1171 gpu->local_mem_info.local_mem_size_public;
1172
1173 buf[0] = gpu->pdev->devfn;
1174 buf[1] = gpu->pdev->subsystem_vendor |
1175 (gpu->pdev->subsystem_device << 16);
1176 buf[2] = pci_domain_nr(gpu->pdev->bus);
1177 buf[3] = gpu->pdev->device;
1178 buf[4] = gpu->pdev->bus->number;
1179 buf[5] = lower_32_bits(local_mem_size);
1180 buf[6] = upper_32_bits(local_mem_size);
1181
1182 for (i = 0, hashout = 0; i < 7; i++)
1183 hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
1184
1185 return hashout;
1186 }
1187
1188
1189
1190
1191
1192 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1193 {
1194 struct kfd_topology_device *dev;
1195 struct kfd_topology_device *out_dev = NULL;
1196 struct kfd_mem_properties *mem;
1197 struct kfd_cache_properties *cache;
1198 struct kfd_iolink_properties *iolink;
1199 struct kfd_iolink_properties *p2plink;
1200
1201 down_write(&topology_lock);
1202 list_for_each_entry(dev, &topology_device_list, list) {
1203
1204
1205
1206 if (!gpu->use_iommu_v2 &&
1207 dev->node_props.cpu_cores_count)
1208 continue;
1209
1210 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1211 dev->gpu = gpu;
1212 out_dev = dev;
1213
1214 list_for_each_entry(mem, &dev->mem_props, list)
1215 mem->gpu = dev->gpu;
1216 list_for_each_entry(cache, &dev->cache_props, list)
1217 cache->gpu = dev->gpu;
1218 list_for_each_entry(iolink, &dev->io_link_props, list)
1219 iolink->gpu = dev->gpu;
1220 list_for_each_entry(p2plink, &dev->p2p_link_props, list)
1221 p2plink->gpu = dev->gpu;
1222 break;
1223 }
1224 }
1225 up_write(&topology_lock);
1226 return out_dev;
1227 }
1228
1229 static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
1230 {
1231
1232
1233
1234
1235 }
1236
1237
1238
1239
1240 static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
1241 {
1242 struct kfd_mem_properties *mem;
1243 struct kfd_local_mem_info local_mem_info;
1244
1245 if (!dev)
1246 return;
1247
1248
1249
1250
1251
1252
1253
1254 amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info);
1255
1256 list_for_each_entry(mem, &dev->mem_props, list)
1257 mem->mem_clk_max = local_mem_info.mem_clk_max;
1258 }
1259
1260 static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
1261 struct kfd_topology_device *target_gpu_dev,
1262 struct kfd_iolink_properties *link)
1263 {
1264
1265 if (link->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1266 return;
1267
1268
1269 if (target_gpu_dev) {
1270 uint32_t cap;
1271
1272 pcie_capability_read_dword(target_gpu_dev->gpu->pdev,
1273 PCI_EXP_DEVCAP2, &cap);
1274
1275 if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
1276 PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
1277 link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
1278 CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
1279
1280 } else {
1281 if (!dev->gpu->pci_atomic_requested ||
1282 dev->gpu->adev->asic_type == CHIP_HAWAII)
1283 link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
1284 CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
1285 }
1286 }
1287
1288 static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
1289 struct kfd_iolink_properties *outbound_link,
1290 struct kfd_iolink_properties *inbound_link)
1291 {
1292
1293 if (!to_dev->gpu &&
1294 inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1295 inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
1296
1297 if (to_dev->gpu) {
1298
1299
1300
1301 if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
1302 (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
1303 KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) {
1304 outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
1305 inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
1306 }
1307 }
1308 }
1309
1310 static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
1311 {
1312 struct kfd_iolink_properties *link, *inbound_link;
1313 struct kfd_topology_device *peer_dev;
1314
1315 if (!dev || !dev->gpu)
1316 return;
1317
1318
1319 list_for_each_entry(link, &dev->io_link_props, list) {
1320 link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1321 kfd_set_iolink_no_atomics(dev, NULL, link);
1322 peer_dev = kfd_topology_device_by_proximity_domain(
1323 link->node_to);
1324
1325 if (!peer_dev)
1326 continue;
1327
1328
1329 if (!peer_dev->gpu && !peer_dev->node_props.hive_id &&
1330 dev->node_props.hive_id &&
1331 dev->gpu->adev->gmc.xgmi.connected_to_cpu)
1332 peer_dev->node_props.hive_id = dev->node_props.hive_id;
1333
1334 list_for_each_entry(inbound_link, &peer_dev->io_link_props,
1335 list) {
1336 if (inbound_link->node_to != link->node_from)
1337 continue;
1338
1339 inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1340 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
1341 kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
1342 }
1343 }
1344
1345
1346 list_for_each_entry(link, &dev->p2p_link_props, list) {
1347 link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1348 kfd_set_iolink_no_atomics(dev, NULL, link);
1349 peer_dev = kfd_topology_device_by_proximity_domain(
1350 link->node_to);
1351
1352 if (!peer_dev)
1353 continue;
1354
1355 list_for_each_entry(inbound_link, &peer_dev->p2p_link_props,
1356 list) {
1357 if (inbound_link->node_to != link->node_from)
1358 continue;
1359
1360 inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1361 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
1362 kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
1363 }
1364 }
1365 }
1366
1367 static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
1368 struct kfd_iolink_properties *p2plink)
1369 {
1370 int ret;
1371
1372 p2plink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
1373 if (!p2plink->kobj)
1374 return -ENOMEM;
1375
1376 ret = kobject_init_and_add(p2plink->kobj, &iolink_type,
1377 dev->kobj_p2plink, "%d", dev->node_props.p2p_links_count - 1);
1378 if (ret < 0) {
1379 kobject_put(p2plink->kobj);
1380 return ret;
1381 }
1382
1383 p2plink->attr.name = "properties";
1384 p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
1385 sysfs_attr_init(&p2plink->attr);
1386 ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
1387 if (ret < 0)
1388 return ret;
1389
1390 return 0;
1391 }
1392
1393 static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
1394 {
1395 struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
1396 struct kfd_iolink_properties *props = NULL, *props2 = NULL;
1397 struct kfd_topology_device *cpu_dev;
1398 int ret = 0;
1399 int i, num_cpu;
1400
1401 num_cpu = 0;
1402 list_for_each_entry(cpu_dev, &topology_device_list, list) {
1403 if (cpu_dev->gpu)
1404 break;
1405 num_cpu++;
1406 }
1407
1408 gpu_link = list_first_entry(&kdev->io_link_props,
1409 struct kfd_iolink_properties, list);
1410 if (!gpu_link)
1411 return -ENOMEM;
1412
1413 for (i = 0; i < num_cpu; i++) {
1414
1415 if (gpu_link->node_to == i)
1416 continue;
1417
1418
1419 cpu_link = NULL;
1420 cpu_dev = kfd_topology_device_by_proximity_domain(i);
1421 if (cpu_dev) {
1422 list_for_each_entry(tmp_link,
1423 &cpu_dev->io_link_props, list) {
1424 if (tmp_link->node_to == gpu_link->node_to) {
1425 cpu_link = tmp_link;
1426 break;
1427 }
1428 }
1429 }
1430
1431 if (!cpu_link)
1432 return -ENOMEM;
1433
1434
1435 props = kfd_alloc_struct(props);
1436 if (!props)
1437 return -ENOMEM;
1438
1439 memcpy(props, gpu_link, sizeof(struct kfd_iolink_properties));
1440 props->weight = gpu_link->weight + cpu_link->weight;
1441 props->min_latency = gpu_link->min_latency + cpu_link->min_latency;
1442 props->max_latency = gpu_link->max_latency + cpu_link->max_latency;
1443 props->min_bandwidth = min(gpu_link->min_bandwidth, cpu_link->min_bandwidth);
1444 props->max_bandwidth = min(gpu_link->max_bandwidth, cpu_link->max_bandwidth);
1445
1446 props->node_from = gpu_node;
1447 props->node_to = i;
1448 kdev->node_props.p2p_links_count++;
1449 list_add_tail(&props->list, &kdev->p2p_link_props);
1450 ret = kfd_build_p2p_node_entry(kdev, props);
1451 if (ret < 0)
1452 return ret;
1453
1454
1455 if (kfd_dev_is_large_bar(kdev->gpu)) {
1456
1457 props2 = kfd_alloc_struct(props2);
1458 if (!props2)
1459 return -ENOMEM;
1460
1461 memcpy(props2, props, sizeof(struct kfd_iolink_properties));
1462 props2->node_from = i;
1463 props2->node_to = gpu_node;
1464 props2->kobj = NULL;
1465 cpu_dev->node_props.p2p_links_count++;
1466 list_add_tail(&props2->list, &cpu_dev->p2p_link_props);
1467 ret = kfd_build_p2p_node_entry(cpu_dev, props2);
1468 if (ret < 0)
1469 return ret;
1470 }
1471 }
1472 return ret;
1473 }
1474
1475 #if defined(CONFIG_HSA_AMD_P2P)
1476 static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
1477 struct kfd_topology_device *peer, int from, int to)
1478 {
1479 struct kfd_iolink_properties *props = NULL;
1480 struct kfd_iolink_properties *iolink1, *iolink2, *iolink3;
1481 struct kfd_topology_device *cpu_dev;
1482 int ret = 0;
1483
1484 if (!amdgpu_device_is_peer_accessible(
1485 kdev->gpu->adev,
1486 peer->gpu->adev))
1487 return ret;
1488
1489 iolink1 = list_first_entry(&kdev->io_link_props,
1490 struct kfd_iolink_properties, list);
1491 if (!iolink1)
1492 return -ENOMEM;
1493
1494 iolink2 = list_first_entry(&peer->io_link_props,
1495 struct kfd_iolink_properties, list);
1496 if (!iolink2)
1497 return -ENOMEM;
1498
1499 props = kfd_alloc_struct(props);
1500 if (!props)
1501 return -ENOMEM;
1502
1503 memcpy(props, iolink1, sizeof(struct kfd_iolink_properties));
1504
1505 props->weight = iolink1->weight + iolink2->weight;
1506 props->min_latency = iolink1->min_latency + iolink2->min_latency;
1507 props->max_latency = iolink1->max_latency + iolink2->max_latency;
1508 props->min_bandwidth = min(iolink1->min_bandwidth, iolink2->min_bandwidth);
1509 props->max_bandwidth = min(iolink2->max_bandwidth, iolink2->max_bandwidth);
1510
1511 if (iolink1->node_to != iolink2->node_to) {
1512
1513 cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
1514 if (cpu_dev) {
1515 list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
1516 if (iolink3->node_to == iolink2->node_to)
1517 break;
1518
1519 props->weight += iolink3->weight;
1520 props->min_latency += iolink3->min_latency;
1521 props->max_latency += iolink3->max_latency;
1522 props->min_bandwidth = min(props->min_bandwidth,
1523 iolink3->min_bandwidth);
1524 props->max_bandwidth = min(props->max_bandwidth,
1525 iolink3->max_bandwidth);
1526 } else {
1527 WARN(1, "CPU node not found");
1528 }
1529 }
1530
1531 props->node_from = from;
1532 props->node_to = to;
1533 peer->node_props.p2p_links_count++;
1534 list_add_tail(&props->list, &peer->p2p_link_props);
1535 ret = kfd_build_p2p_node_entry(peer, props);
1536
1537 return ret;
1538 }
1539 #endif
1540
1541 static int kfd_dev_create_p2p_links(void)
1542 {
1543 struct kfd_topology_device *dev;
1544 struct kfd_topology_device *new_dev;
1545 #if defined(CONFIG_HSA_AMD_P2P)
1546 uint32_t i;
1547 #endif
1548 uint32_t k;
1549 int ret = 0;
1550
1551 k = 0;
1552 list_for_each_entry(dev, &topology_device_list, list)
1553 k++;
1554 if (k < 2)
1555 return 0;
1556
1557 new_dev = list_last_entry(&topology_device_list, struct kfd_topology_device, list);
1558 if (WARN_ON(!new_dev->gpu))
1559 return 0;
1560
1561 k--;
1562
1563
1564 ret = kfd_create_indirect_link_prop(new_dev, k);
1565 if (ret < 0)
1566 goto out;
1567
1568
1569 #if defined(CONFIG_HSA_AMD_P2P)
1570 i = 0;
1571 list_for_each_entry(dev, &topology_device_list, list) {
1572 if (dev == new_dev)
1573 break;
1574 if (!dev->gpu || !dev->gpu->adev ||
1575 (dev->gpu->hive_id &&
1576 dev->gpu->hive_id == new_dev->gpu->hive_id))
1577 goto next;
1578
1579
1580 ret = kfd_add_peer_prop(new_dev, dev, i, k);
1581 if (ret < 0)
1582 goto out;
1583
1584 ret = kfd_add_peer_prop(dev, new_dev, k, i);
1585 if (ret < 0)
1586 goto out;
1587 next:
1588 i++;
1589 }
1590 #endif
1591
1592 out:
1593 return ret;
1594 }
1595
1596 int kfd_topology_add_device(struct kfd_dev *gpu)
1597 {
1598 uint32_t gpu_id;
1599 struct kfd_topology_device *dev;
1600 struct kfd_cu_info cu_info;
1601 int res = 0;
1602 struct list_head temp_topology_device_list;
1603 void *crat_image = NULL;
1604 size_t image_size = 0;
1605 int proximity_domain;
1606 int i;
1607 const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
1608
1609 INIT_LIST_HEAD(&temp_topology_device_list);
1610
1611 gpu_id = kfd_generate_gpu_id(gpu);
1612 pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
1613
1614
1615
1616
1617
1618
1619
1620 dev = kfd_assign_gpu(gpu);
1621 if (!dev) {
1622 down_write(&topology_lock);
1623 proximity_domain = ++topology_crat_proximity_domain;
1624
1625 res = kfd_create_crat_image_virtual(&crat_image, &image_size,
1626 COMPUTE_UNIT_GPU, gpu,
1627 proximity_domain);
1628 if (res) {
1629 pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
1630 gpu_id);
1631 topology_crat_proximity_domain--;
1632 return res;
1633 }
1634 res = kfd_parse_crat_table(crat_image,
1635 &temp_topology_device_list,
1636 proximity_domain);
1637 if (res) {
1638 pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
1639 gpu_id);
1640 topology_crat_proximity_domain--;
1641 goto err;
1642 }
1643
1644 kfd_topology_update_device_list(&temp_topology_device_list,
1645 &topology_device_list);
1646
1647
1648
1649
1650 res = kfd_topology_update_sysfs();
1651 up_write(&topology_lock);
1652
1653 if (!res)
1654 sys_props.generation_count++;
1655 else
1656 pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
1657 gpu_id, res);
1658 dev = kfd_assign_gpu(gpu);
1659 if (WARN_ON(!dev)) {
1660 res = -ENODEV;
1661 goto err;
1662 }
1663 }
1664
1665 dev->gpu_id = gpu_id;
1666 gpu->id = gpu_id;
1667
1668 kfd_dev_create_p2p_links();
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info);
1679
1680 for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) {
1681 dev->node_props.name[i] = __tolower(asic_name[i]);
1682 if (asic_name[i] == '\0')
1683 break;
1684 }
1685 dev->node_props.name[i] = '\0';
1686
1687 dev->node_props.simd_arrays_per_engine =
1688 cu_info.num_shader_arrays_per_engine;
1689
1690 dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version;
1691 dev->node_props.vendor_id = gpu->pdev->vendor;
1692 dev->node_props.device_id = gpu->pdev->device;
1693 dev->node_props.capability |=
1694 ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
1695 HSA_CAP_ASIC_REVISION_MASK);
1696 dev->node_props.location_id = pci_dev_id(gpu->pdev);
1697 dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
1698 dev->node_props.max_engine_clk_fcompute =
1699 amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
1700 dev->node_props.max_engine_clk_ccompute =
1701 cpufreq_quick_get_max(0) / 1000;
1702 dev->node_props.drm_render_minor =
1703 gpu->shared_resources.drm_render_minor;
1704
1705 dev->node_props.hive_id = gpu->hive_id;
1706 dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
1707 dev->node_props.num_sdma_xgmi_engines =
1708 kfd_get_num_xgmi_sdma_engines(gpu);
1709 dev->node_props.num_sdma_queues_per_engine =
1710 gpu->device_info.num_sdma_queues_per_engine -
1711 gpu->device_info.num_reserved_sdma_queues_per_engine;
1712 dev->node_props.num_gws = (dev->gpu->gws &&
1713 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
1714 dev->gpu->adev->gds.gws_size : 0;
1715 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
1716
1717 kfd_fill_mem_clk_max_info(dev);
1718 kfd_fill_iolink_non_crat_info(dev);
1719
1720 switch (dev->gpu->adev->asic_type) {
1721 case CHIP_KAVERI:
1722 case CHIP_HAWAII:
1723 case CHIP_TONGA:
1724 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 <<
1725 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1726 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1727 break;
1728 case CHIP_CARRIZO:
1729 case CHIP_FIJI:
1730 case CHIP_POLARIS10:
1731 case CHIP_POLARIS11:
1732 case CHIP_POLARIS12:
1733 case CHIP_VEGAM:
1734 pr_debug("Adding doorbell packet type capability\n");
1735 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
1736 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1737 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1738 break;
1739 default:
1740 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1))
1741 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
1742 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1743 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1744 else
1745 WARN(1, "Unexpected ASIC family %u",
1746 dev->gpu->adev->asic_type);
1747 }
1748
1749
1750
1751
1752
1753 if (dev->gpu->use_iommu_v2)
1754 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
1755 else
1756 dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
1757
1758
1759
1760
1761
1762
1763 if (dev->gpu->adev->asic_type == CHIP_CARRIZO) {
1764 dev->node_props.simd_count =
1765 cu_info.simd_per_cu * cu_info.cu_active_number;
1766 dev->node_props.max_waves_per_simd = 10;
1767 }
1768
1769
1770 dev->node_props.capability |=
1771 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
1772 HSA_CAP_SRAM_EDCSUPPORTED : 0;
1773 dev->node_props.capability |=
1774 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
1775 HSA_CAP_MEM_EDCSUPPORTED : 0;
1776
1777 if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1))
1778 dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
1779 HSA_CAP_RASEVENTNOTIFY : 0;
1780
1781 if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
1782 dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
1783
1784 kfd_debug_print_topology();
1785
1786 if (!res)
1787 kfd_notify_gpu_change(gpu_id, 1);
1788 err:
1789 kfd_destroy_crat_image(crat_image);
1790 return res;
1791 }
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812 static void kfd_topology_update_io_links(int proximity_domain)
1813 {
1814 struct kfd_topology_device *dev;
1815 struct kfd_iolink_properties *iolink, *p2plink, *tmp;
1816
1817 list_for_each_entry(dev, &topology_device_list, list) {
1818 if (dev->proximity_domain > proximity_domain)
1819 dev->proximity_domain--;
1820
1821 list_for_each_entry_safe(iolink, tmp, &dev->io_link_props, list) {
1822
1823
1824
1825
1826 if (iolink->node_to == proximity_domain) {
1827 list_del(&iolink->list);
1828 dev->node_props.io_links_count--;
1829 } else {
1830 if (iolink->node_from > proximity_domain)
1831 iolink->node_from--;
1832 if (iolink->node_to > proximity_domain)
1833 iolink->node_to--;
1834 }
1835 }
1836
1837 list_for_each_entry_safe(p2plink, tmp, &dev->p2p_link_props, list) {
1838
1839
1840
1841
1842 if (p2plink->node_to == proximity_domain) {
1843 list_del(&p2plink->list);
1844 dev->node_props.p2p_links_count--;
1845 } else {
1846 if (p2plink->node_from > proximity_domain)
1847 p2plink->node_from--;
1848 if (p2plink->node_to > proximity_domain)
1849 p2plink->node_to--;
1850 }
1851 }
1852 }
1853 }
1854
1855 int kfd_topology_remove_device(struct kfd_dev *gpu)
1856 {
1857 struct kfd_topology_device *dev, *tmp;
1858 uint32_t gpu_id;
1859 int res = -ENODEV;
1860 int i = 0;
1861
1862 down_write(&topology_lock);
1863
1864 list_for_each_entry_safe(dev, tmp, &topology_device_list, list) {
1865 if (dev->gpu == gpu) {
1866 gpu_id = dev->gpu_id;
1867 kfd_remove_sysfs_node_entry(dev);
1868 kfd_release_topology_device(dev);
1869 sys_props.num_devices--;
1870 kfd_topology_update_io_links(i);
1871 topology_crat_proximity_domain = sys_props.num_devices-1;
1872 sys_props.generation_count++;
1873 res = 0;
1874 if (kfd_topology_update_sysfs() < 0)
1875 kfd_topology_release_sysfs();
1876 break;
1877 }
1878 i++;
1879 }
1880
1881 up_write(&topology_lock);
1882
1883 if (!res)
1884 kfd_notify_gpu_change(gpu_id, 0);
1885
1886 return res;
1887 }
1888
1889
1890
1891
1892
1893
1894
1895 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
1896 {
1897
1898 struct kfd_topology_device *top_dev;
1899 uint8_t device_idx = 0;
1900
1901 *kdev = NULL;
1902 down_read(&topology_lock);
1903
1904 list_for_each_entry(top_dev, &topology_device_list, list) {
1905 if (device_idx == idx) {
1906 *kdev = top_dev->gpu;
1907 up_read(&topology_lock);
1908 return 0;
1909 }
1910
1911 device_idx++;
1912 }
1913
1914 up_read(&topology_lock);
1915
1916 return -1;
1917
1918 }
1919
1920 static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1921 {
1922 int first_cpu_of_numa_node;
1923
1924 if (!cpumask || cpumask == cpu_none_mask)
1925 return -1;
1926 first_cpu_of_numa_node = cpumask_first(cpumask);
1927 if (first_cpu_of_numa_node >= nr_cpu_ids)
1928 return -1;
1929 #ifdef CONFIG_X86_64
1930 return cpu_data(first_cpu_of_numa_node).apicid;
1931 #else
1932 return first_cpu_of_numa_node;
1933 #endif
1934 }
1935
1936
1937
1938
1939
1940 int kfd_numa_node_to_apic_id(int numa_node_id)
1941 {
1942 if (numa_node_id == -1) {
1943 pr_warn("Invalid NUMA Node. Use online CPU mask\n");
1944 return kfd_cpumask_to_apic_id(cpu_online_mask);
1945 }
1946 return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
1947 }
1948
1949 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu)
1950 {
1951 struct kfd_topology_device *dev;
1952
1953 gpu->use_iommu_v2 = false;
1954
1955 if (!gpu->device_info.needs_iommu_device)
1956 return;
1957
1958 down_read(&topology_lock);
1959
1960
1961
1962
1963 list_for_each_entry(dev, &topology_device_list, list)
1964 if (dev->node_props.cpu_cores_count &&
1965 dev->node_props.simd_count &&
1966 !dev->gpu)
1967 gpu->use_iommu_v2 = true;
1968
1969 up_read(&topology_lock);
1970 }
1971
1972 #if defined(CONFIG_DEBUG_FS)
1973
1974 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
1975 {
1976 struct kfd_topology_device *dev;
1977 unsigned int i = 0;
1978 int r = 0;
1979
1980 down_read(&topology_lock);
1981
1982 list_for_each_entry(dev, &topology_device_list, list) {
1983 if (!dev->gpu) {
1984 i++;
1985 continue;
1986 }
1987
1988 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
1989 r = dqm_debugfs_hqds(m, dev->gpu->dqm);
1990 if (r)
1991 break;
1992 }
1993
1994 up_read(&topology_lock);
1995
1996 return r;
1997 }
1998
1999 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
2000 {
2001 struct kfd_topology_device *dev;
2002 unsigned int i = 0;
2003 int r = 0;
2004
2005 down_read(&topology_lock);
2006
2007 list_for_each_entry(dev, &topology_device_list, list) {
2008 if (!dev->gpu) {
2009 i++;
2010 continue;
2011 }
2012
2013 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
2014 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr);
2015 if (r)
2016 break;
2017 }
2018
2019 up_read(&topology_lock);
2020
2021 return r;
2022 }
2023
2024 #endif