0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/of.h>
0013 #include <linux/of_address.h>
0014 #include <linux/of_platform.h>
0015 #include <linux/crash_dump.h>
0016 #include <linux/debugfs.h>
0017 #include <asm/opal.h>
0018 #include <asm/io.h>
0019 #include <asm/imc-pmu.h>
0020 #include <asm/cputhreads.h>
0021
0022 static struct dentry *imc_debugfs_parent;
0023
0024
0025 static int imc_mem_get(void *data, u64 *val)
0026 {
0027 *val = cpu_to_be64(*(u64 *)data);
0028 return 0;
0029 }
0030
0031 static int imc_mem_set(void *data, u64 val)
0032 {
0033 *(u64 *)data = cpu_to_be64(val);
0034 return 0;
0035 }
0036 DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
0037
0038 static void imc_debugfs_create_x64(const char *name, umode_t mode,
0039 struct dentry *parent, u64 *value)
0040 {
0041 debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
0042 }
0043
0044
0045
0046
0047
0048
0049
0050
0051 static void export_imc_mode_and_cmd(struct device_node *node,
0052 struct imc_pmu *pmu_ptr)
0053 {
0054 static u64 loc, *imc_mode_addr, *imc_cmd_addr;
0055 char mode[16], cmd[16];
0056 u32 cb_offset;
0057 struct imc_mem_info *ptr = pmu_ptr->mem_info;
0058
0059 imc_debugfs_parent = debugfs_create_dir("imc", arch_debugfs_dir);
0060
0061 if (of_property_read_u32(node, "cb_offset", &cb_offset))
0062 cb_offset = IMC_CNTL_BLK_OFFSET;
0063
0064 while (ptr->vbase != NULL) {
0065 loc = (u64)(ptr->vbase) + cb_offset;
0066 imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
0067 sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
0068 imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
0069 imc_mode_addr);
0070
0071 imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
0072 sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
0073 imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
0074 imc_cmd_addr);
0075 ptr++;
0076 }
0077 }
0078
0079
0080
0081
0082
0083 static int imc_get_mem_addr_nest(struct device_node *node,
0084 struct imc_pmu *pmu_ptr,
0085 u32 offset)
0086 {
0087 int nr_chips = 0, i;
0088 u64 *base_addr_arr, baddr;
0089 u32 *chipid_arr;
0090
0091 nr_chips = of_property_count_u32_elems(node, "chip-id");
0092 if (nr_chips <= 0)
0093 return -ENODEV;
0094
0095 base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL);
0096 if (!base_addr_arr)
0097 return -ENOMEM;
0098
0099 chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL);
0100 if (!chipid_arr) {
0101 kfree(base_addr_arr);
0102 return -ENOMEM;
0103 }
0104
0105 if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
0106 goto error;
0107
0108 if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
0109 nr_chips))
0110 goto error;
0111
0112 pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
0113 GFP_KERNEL);
0114 if (!pmu_ptr->mem_info)
0115 goto error;
0116
0117 for (i = 0; i < nr_chips; i++) {
0118 pmu_ptr->mem_info[i].id = chipid_arr[i];
0119 baddr = base_addr_arr[i] + offset;
0120 pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
0121 }
0122
0123 pmu_ptr->imc_counter_mmaped = true;
0124 kfree(base_addr_arr);
0125 kfree(chipid_arr);
0126 return 0;
0127
0128 error:
0129 kfree(base_addr_arr);
0130 kfree(chipid_arr);
0131 return -1;
0132 }
0133
0134
0135
0136
0137
0138
0139 static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
0140 {
0141 int ret = 0;
0142 struct imc_pmu *pmu_ptr;
0143 u32 offset;
0144
0145
0146 if (domain < 0)
0147 return NULL;
0148
0149
0150 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
0151 if (!pmu_ptr)
0152 return NULL;
0153
0154
0155 pmu_ptr->domain = domain;
0156
0157 ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
0158 if (ret)
0159 goto free_pmu;
0160
0161 if (!of_property_read_u32(parent, "offset", &offset)) {
0162 if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
0163 goto free_pmu;
0164 }
0165
0166
0167 ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
0168 if (ret) {
0169 pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
0170 kfree(pmu_ptr->pmu.name);
0171 if (pmu_ptr->domain == IMC_DOMAIN_NEST)
0172 kfree(pmu_ptr->mem_info);
0173 kfree(pmu_ptr);
0174 return NULL;
0175 }
0176
0177 return pmu_ptr;
0178
0179 free_pmu:
0180 kfree(pmu_ptr);
0181 return NULL;
0182 }
0183
0184 static void disable_nest_pmu_counters(void)
0185 {
0186 int nid, cpu;
0187 const struct cpumask *l_cpumask;
0188
0189 cpus_read_lock();
0190 for_each_node_with_cpus(nid) {
0191 l_cpumask = cpumask_of_node(nid);
0192 cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
0193 if (cpu >= nr_cpu_ids)
0194 continue;
0195 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
0196 get_hard_smp_processor_id(cpu));
0197 }
0198 cpus_read_unlock();
0199 }
0200
0201 static void disable_core_pmu_counters(void)
0202 {
0203 int cpu, rc;
0204
0205 cpus_read_lock();
0206
0207 for_each_online_cpu(cpu) {
0208 if (cpu_first_thread_sibling(cpu) != cpu)
0209 continue;
0210 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
0211 get_hard_smp_processor_id(cpu));
0212 if (rc)
0213 pr_err("%s: Failed to stop Core (cpu = %d)\n",
0214 __func__, cpu);
0215 }
0216 cpus_read_unlock();
0217 }
0218
0219 int get_max_nest_dev(void)
0220 {
0221 struct device_node *node;
0222 u32 pmu_units = 0, type;
0223
0224 for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
0225 if (of_property_read_u32(node, "type", &type))
0226 continue;
0227
0228 if (type == IMC_TYPE_CHIP)
0229 pmu_units++;
0230 }
0231
0232 return pmu_units;
0233 }
0234
0235 static int opal_imc_counters_probe(struct platform_device *pdev)
0236 {
0237 struct device_node *imc_dev = pdev->dev.of_node;
0238 struct imc_pmu *pmu;
0239 int pmu_count = 0, domain;
0240 bool core_imc_reg = false, thread_imc_reg = false;
0241 u32 type;
0242
0243
0244
0245
0246
0247 if (is_kdump_kernel()) {
0248 disable_nest_pmu_counters();
0249 disable_core_pmu_counters();
0250 return -ENODEV;
0251 }
0252
0253 for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
0254 pmu = NULL;
0255 if (of_property_read_u32(imc_dev, "type", &type)) {
0256 pr_warn("IMC Device without type property\n");
0257 continue;
0258 }
0259
0260 switch (type) {
0261 case IMC_TYPE_CHIP:
0262 domain = IMC_DOMAIN_NEST;
0263 break;
0264 case IMC_TYPE_CORE:
0265 domain =IMC_DOMAIN_CORE;
0266 break;
0267 case IMC_TYPE_THREAD:
0268 domain = IMC_DOMAIN_THREAD;
0269 break;
0270 case IMC_TYPE_TRACE:
0271 domain = IMC_DOMAIN_TRACE;
0272 break;
0273 default:
0274 pr_warn("IMC Unknown Device type \n");
0275 domain = -1;
0276 break;
0277 }
0278
0279 pmu = imc_pmu_create(imc_dev, pmu_count, domain);
0280 if (pmu != NULL) {
0281 if (domain == IMC_DOMAIN_NEST) {
0282 if (!imc_debugfs_parent)
0283 export_imc_mode_and_cmd(imc_dev, pmu);
0284 pmu_count++;
0285 }
0286 if (domain == IMC_DOMAIN_CORE)
0287 core_imc_reg = true;
0288 if (domain == IMC_DOMAIN_THREAD)
0289 thread_imc_reg = true;
0290 }
0291 }
0292
0293
0294 if (!core_imc_reg && thread_imc_reg)
0295 unregister_thread_imc();
0296
0297 return 0;
0298 }
0299
0300 static void opal_imc_counters_shutdown(struct platform_device *pdev)
0301 {
0302
0303
0304
0305
0306
0307 disable_nest_pmu_counters();
0308 disable_core_pmu_counters();
0309 }
0310
0311 static const struct of_device_id opal_imc_match[] = {
0312 { .compatible = IMC_DTB_COMPAT },
0313 {},
0314 };
0315
0316 static struct platform_driver opal_imc_driver = {
0317 .driver = {
0318 .name = "opal-imc-counters",
0319 .of_match_table = opal_imc_match,
0320 },
0321 .probe = opal_imc_counters_probe,
0322 .shutdown = opal_imc_counters_shutdown,
0323 };
0324
0325 builtin_platform_driver(opal_imc_driver);