0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/acpi.h>
0010 #include <linux/cacheinfo.h>
0011 #include <linux/cpu.h>
0012 #include <linux/cpufreq.h>
0013 #include <linux/device.h>
0014 #include <linux/of.h>
0015 #include <linux/slab.h>
0016 #include <linux/sched/topology.h>
0017 #include <linux/cpuset.h>
0018 #include <linux/cpumask.h>
0019 #include <linux/init.h>
0020 #include <linux/rcupdate.h>
0021 #include <linux/sched.h>
0022
0023 #define CREATE_TRACE_POINTS
0024 #include <trace/events/thermal_pressure.h>
0025
0026 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
0027 static struct cpumask scale_freq_counters_mask;
0028 static bool scale_freq_invariant;
0029 static DEFINE_PER_CPU(u32, freq_factor) = 1;
0030
0031 static bool supports_scale_freq_counters(const struct cpumask *cpus)
0032 {
0033 return cpumask_subset(cpus, &scale_freq_counters_mask);
0034 }
0035
0036 bool topology_scale_freq_invariant(void)
0037 {
0038 return cpufreq_supports_freq_invariance() ||
0039 supports_scale_freq_counters(cpu_online_mask);
0040 }
0041
0042 static void update_scale_freq_invariant(bool status)
0043 {
0044 if (scale_freq_invariant == status)
0045 return;
0046
0047
0048
0049
0050
0051
0052
0053 if (topology_scale_freq_invariant() == status) {
0054 scale_freq_invariant = status;
0055 rebuild_sched_domains_energy();
0056 }
0057 }
0058
0059 void topology_set_scale_freq_source(struct scale_freq_data *data,
0060 const struct cpumask *cpus)
0061 {
0062 struct scale_freq_data *sfd;
0063 int cpu;
0064
0065
0066
0067
0068
0069 if (cpumask_empty(&scale_freq_counters_mask))
0070 scale_freq_invariant = topology_scale_freq_invariant();
0071
0072 rcu_read_lock();
0073
0074 for_each_cpu(cpu, cpus) {
0075 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
0076
0077
0078 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
0079 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
0080 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
0081 }
0082 }
0083
0084 rcu_read_unlock();
0085
0086 update_scale_freq_invariant(true);
0087 }
0088 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
0089
0090 void topology_clear_scale_freq_source(enum scale_freq_source source,
0091 const struct cpumask *cpus)
0092 {
0093 struct scale_freq_data *sfd;
0094 int cpu;
0095
0096 rcu_read_lock();
0097
0098 for_each_cpu(cpu, cpus) {
0099 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
0100
0101 if (sfd && sfd->source == source) {
0102 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
0103 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
0104 }
0105 }
0106
0107 rcu_read_unlock();
0108
0109
0110
0111
0112
0113 synchronize_rcu();
0114
0115 update_scale_freq_invariant(false);
0116 }
0117 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
0118
0119 void topology_scale_freq_tick(void)
0120 {
0121 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
0122
0123 if (sfd)
0124 sfd->set_freq_scale();
0125 }
0126
0127 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
0128 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
0129
0130 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
0131 unsigned long max_freq)
0132 {
0133 unsigned long scale;
0134 int i;
0135
0136 if (WARN_ON_ONCE(!cur_freq || !max_freq))
0137 return;
0138
0139
0140
0141
0142
0143
0144 if (supports_scale_freq_counters(cpus))
0145 return;
0146
0147 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
0148
0149 for_each_cpu(i, cpus)
0150 per_cpu(arch_freq_scale, i) = scale;
0151 }
0152
0153 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
0154 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
0155
0156 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
0157 {
0158 per_cpu(cpu_scale, cpu) = capacity;
0159 }
0160
0161 DEFINE_PER_CPU(unsigned long, thermal_pressure);
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 void topology_update_thermal_pressure(const struct cpumask *cpus,
0178 unsigned long capped_freq)
0179 {
0180 unsigned long max_capacity, capacity, th_pressure;
0181 u32 max_freq;
0182 int cpu;
0183
0184 cpu = cpumask_first(cpus);
0185 max_capacity = arch_scale_cpu_capacity(cpu);
0186 max_freq = per_cpu(freq_factor, cpu);
0187
0188
0189 capped_freq /= 1000;
0190
0191
0192
0193
0194
0195 if (max_freq <= capped_freq)
0196 capacity = max_capacity;
0197 else
0198 capacity = mult_frac(max_capacity, capped_freq, max_freq);
0199
0200 th_pressure = max_capacity - capacity;
0201
0202 trace_thermal_pressure_update(cpu, th_pressure);
0203
0204 for_each_cpu(cpu, cpus)
0205 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
0206 }
0207 EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
0208
0209 static ssize_t cpu_capacity_show(struct device *dev,
0210 struct device_attribute *attr,
0211 char *buf)
0212 {
0213 struct cpu *cpu = container_of(dev, struct cpu, dev);
0214
0215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
0216 }
0217
0218 static void update_topology_flags_workfn(struct work_struct *work);
0219 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
0220
0221 static DEVICE_ATTR_RO(cpu_capacity);
0222
0223 static int register_cpu_capacity_sysctl(void)
0224 {
0225 int i;
0226 struct device *cpu;
0227
0228 for_each_possible_cpu(i) {
0229 cpu = get_cpu_device(i);
0230 if (!cpu) {
0231 pr_err("%s: too early to get CPU%d device!\n",
0232 __func__, i);
0233 continue;
0234 }
0235 device_create_file(cpu, &dev_attr_cpu_capacity);
0236 }
0237
0238 return 0;
0239 }
0240 subsys_initcall(register_cpu_capacity_sysctl);
0241
0242 static int update_topology;
0243
0244 int topology_update_cpu_topology(void)
0245 {
0246 return update_topology;
0247 }
0248
0249
0250
0251
0252
0253 static void update_topology_flags_workfn(struct work_struct *work)
0254 {
0255 update_topology = 1;
0256 rebuild_sched_domains();
0257 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
0258 update_topology = 0;
0259 }
0260
0261 static u32 *raw_capacity;
0262
0263 static int free_raw_capacity(void)
0264 {
0265 kfree(raw_capacity);
0266 raw_capacity = NULL;
0267
0268 return 0;
0269 }
0270
0271 void topology_normalize_cpu_scale(void)
0272 {
0273 u64 capacity;
0274 u64 capacity_scale;
0275 int cpu;
0276
0277 if (!raw_capacity)
0278 return;
0279
0280 capacity_scale = 1;
0281 for_each_possible_cpu(cpu) {
0282 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
0283 capacity_scale = max(capacity, capacity_scale);
0284 }
0285
0286 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
0287 for_each_possible_cpu(cpu) {
0288 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
0289 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
0290 capacity_scale);
0291 topology_set_cpu_scale(cpu, capacity);
0292 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
0293 cpu, topology_get_cpu_scale(cpu));
0294 }
0295 }
0296
0297 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
0298 {
0299 struct clk *cpu_clk;
0300 static bool cap_parsing_failed;
0301 int ret;
0302 u32 cpu_capacity;
0303
0304 if (cap_parsing_failed)
0305 return false;
0306
0307 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
0308 &cpu_capacity);
0309 if (!ret) {
0310 if (!raw_capacity) {
0311 raw_capacity = kcalloc(num_possible_cpus(),
0312 sizeof(*raw_capacity),
0313 GFP_KERNEL);
0314 if (!raw_capacity) {
0315 cap_parsing_failed = true;
0316 return false;
0317 }
0318 }
0319 raw_capacity[cpu] = cpu_capacity;
0320 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
0321 cpu_node, raw_capacity[cpu]);
0322
0323
0324
0325
0326
0327
0328
0329 cpu_clk = of_clk_get(cpu_node, 0);
0330 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
0331 per_cpu(freq_factor, cpu) =
0332 clk_get_rate(cpu_clk) / 1000;
0333 clk_put(cpu_clk);
0334 }
0335 } else {
0336 if (raw_capacity) {
0337 pr_err("cpu_capacity: missing %pOF raw capacity\n",
0338 cpu_node);
0339 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
0340 }
0341 cap_parsing_failed = true;
0342 free_raw_capacity();
0343 }
0344
0345 return !ret;
0346 }
0347
0348 #ifdef CONFIG_ACPI_CPPC_LIB
0349 #include <acpi/cppc_acpi.h>
0350
0351 void topology_init_cpu_capacity_cppc(void)
0352 {
0353 struct cppc_perf_caps perf_caps;
0354 int cpu;
0355
0356 if (likely(acpi_disabled || !acpi_cpc_valid()))
0357 return;
0358
0359 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
0360 GFP_KERNEL);
0361 if (!raw_capacity)
0362 return;
0363
0364 for_each_possible_cpu(cpu) {
0365 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
0366 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
0367 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
0368 raw_capacity[cpu] = perf_caps.highest_perf;
0369 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
0370 cpu, raw_capacity[cpu]);
0371 continue;
0372 }
0373
0374 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
0375 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
0376 goto exit;
0377 }
0378
0379 topology_normalize_cpu_scale();
0380 schedule_work(&update_topology_flags_work);
0381 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
0382
0383 exit:
0384 free_raw_capacity();
0385 }
0386 #endif
0387
0388 #ifdef CONFIG_CPU_FREQ
0389 static cpumask_var_t cpus_to_visit;
0390 static void parsing_done_workfn(struct work_struct *work);
0391 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
0392
0393 static int
0394 init_cpu_capacity_callback(struct notifier_block *nb,
0395 unsigned long val,
0396 void *data)
0397 {
0398 struct cpufreq_policy *policy = data;
0399 int cpu;
0400
0401 if (!raw_capacity)
0402 return 0;
0403
0404 if (val != CPUFREQ_CREATE_POLICY)
0405 return 0;
0406
0407 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
0408 cpumask_pr_args(policy->related_cpus),
0409 cpumask_pr_args(cpus_to_visit));
0410
0411 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
0412
0413 for_each_cpu(cpu, policy->related_cpus)
0414 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
0415
0416 if (cpumask_empty(cpus_to_visit)) {
0417 topology_normalize_cpu_scale();
0418 schedule_work(&update_topology_flags_work);
0419 free_raw_capacity();
0420 pr_debug("cpu_capacity: parsing done\n");
0421 schedule_work(&parsing_done_work);
0422 }
0423
0424 return 0;
0425 }
0426
0427 static struct notifier_block init_cpu_capacity_notifier = {
0428 .notifier_call = init_cpu_capacity_callback,
0429 };
0430
0431 static int __init register_cpufreq_notifier(void)
0432 {
0433 int ret;
0434
0435
0436
0437
0438
0439 if (!acpi_disabled || !raw_capacity)
0440 return -EINVAL;
0441
0442 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
0443 return -ENOMEM;
0444
0445 cpumask_copy(cpus_to_visit, cpu_possible_mask);
0446
0447 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
0448 CPUFREQ_POLICY_NOTIFIER);
0449
0450 if (ret)
0451 free_cpumask_var(cpus_to_visit);
0452
0453 return ret;
0454 }
0455 core_initcall(register_cpufreq_notifier);
0456
0457 static void parsing_done_workfn(struct work_struct *work)
0458 {
0459 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
0460 CPUFREQ_POLICY_NOTIFIER);
0461 free_cpumask_var(cpus_to_visit);
0462 }
0463
0464 #else
0465 core_initcall(free_raw_capacity);
0466 #endif
0467
0468 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 static int __init get_cpu_for_node(struct device_node *node)
0480 {
0481 struct device_node *cpu_node;
0482 int cpu;
0483
0484 cpu_node = of_parse_phandle(node, "cpu", 0);
0485 if (!cpu_node)
0486 return -1;
0487
0488 cpu = of_cpu_node_to_id(cpu_node);
0489 if (cpu >= 0)
0490 topology_parse_cpu_capacity(cpu_node, cpu);
0491 else
0492 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
0493 cpu_node, cpumask_pr_args(cpu_possible_mask));
0494
0495 of_node_put(cpu_node);
0496 return cpu;
0497 }
0498
0499 static int __init parse_core(struct device_node *core, int package_id,
0500 int cluster_id, int core_id)
0501 {
0502 char name[20];
0503 bool leaf = true;
0504 int i = 0;
0505 int cpu;
0506 struct device_node *t;
0507
0508 do {
0509 snprintf(name, sizeof(name), "thread%d", i);
0510 t = of_get_child_by_name(core, name);
0511 if (t) {
0512 leaf = false;
0513 cpu = get_cpu_for_node(t);
0514 if (cpu >= 0) {
0515 cpu_topology[cpu].package_id = package_id;
0516 cpu_topology[cpu].cluster_id = cluster_id;
0517 cpu_topology[cpu].core_id = core_id;
0518 cpu_topology[cpu].thread_id = i;
0519 } else if (cpu != -ENODEV) {
0520 pr_err("%pOF: Can't get CPU for thread\n", t);
0521 of_node_put(t);
0522 return -EINVAL;
0523 }
0524 of_node_put(t);
0525 }
0526 i++;
0527 } while (t);
0528
0529 cpu = get_cpu_for_node(core);
0530 if (cpu >= 0) {
0531 if (!leaf) {
0532 pr_err("%pOF: Core has both threads and CPU\n",
0533 core);
0534 return -EINVAL;
0535 }
0536
0537 cpu_topology[cpu].package_id = package_id;
0538 cpu_topology[cpu].cluster_id = cluster_id;
0539 cpu_topology[cpu].core_id = core_id;
0540 } else if (leaf && cpu != -ENODEV) {
0541 pr_err("%pOF: Can't get CPU for leaf core\n", core);
0542 return -EINVAL;
0543 }
0544
0545 return 0;
0546 }
0547
0548 static int __init parse_cluster(struct device_node *cluster, int package_id,
0549 int cluster_id, int depth)
0550 {
0551 char name[20];
0552 bool leaf = true;
0553 bool has_cores = false;
0554 struct device_node *c;
0555 int core_id = 0;
0556 int i, ret;
0557
0558
0559
0560
0561
0562
0563 i = 0;
0564 do {
0565 snprintf(name, sizeof(name), "cluster%d", i);
0566 c = of_get_child_by_name(cluster, name);
0567 if (c) {
0568 leaf = false;
0569 ret = parse_cluster(c, package_id, i, depth + 1);
0570 if (depth > 0)
0571 pr_warn("Topology for clusters of clusters not yet supported\n");
0572 of_node_put(c);
0573 if (ret != 0)
0574 return ret;
0575 }
0576 i++;
0577 } while (c);
0578
0579
0580 i = 0;
0581 do {
0582 snprintf(name, sizeof(name), "core%d", i);
0583 c = of_get_child_by_name(cluster, name);
0584 if (c) {
0585 has_cores = true;
0586
0587 if (depth == 0) {
0588 pr_err("%pOF: cpu-map children should be clusters\n",
0589 c);
0590 of_node_put(c);
0591 return -EINVAL;
0592 }
0593
0594 if (leaf) {
0595 ret = parse_core(c, package_id, cluster_id,
0596 core_id++);
0597 } else {
0598 pr_err("%pOF: Non-leaf cluster with core %s\n",
0599 cluster, name);
0600 ret = -EINVAL;
0601 }
0602
0603 of_node_put(c);
0604 if (ret != 0)
0605 return ret;
0606 }
0607 i++;
0608 } while (c);
0609
0610 if (leaf && !has_cores)
0611 pr_warn("%pOF: empty cluster\n", cluster);
0612
0613 return 0;
0614 }
0615
0616 static int __init parse_socket(struct device_node *socket)
0617 {
0618 char name[20];
0619 struct device_node *c;
0620 bool has_socket = false;
0621 int package_id = 0, ret;
0622
0623 do {
0624 snprintf(name, sizeof(name), "socket%d", package_id);
0625 c = of_get_child_by_name(socket, name);
0626 if (c) {
0627 has_socket = true;
0628 ret = parse_cluster(c, package_id, -1, 0);
0629 of_node_put(c);
0630 if (ret != 0)
0631 return ret;
0632 }
0633 package_id++;
0634 } while (c);
0635
0636 if (!has_socket)
0637 ret = parse_cluster(socket, 0, -1, 0);
0638
0639 return ret;
0640 }
0641
0642 static int __init parse_dt_topology(void)
0643 {
0644 struct device_node *cn, *map;
0645 int ret = 0;
0646 int cpu;
0647
0648 cn = of_find_node_by_path("/cpus");
0649 if (!cn) {
0650 pr_err("No CPU information found in DT\n");
0651 return 0;
0652 }
0653
0654
0655
0656
0657
0658 map = of_get_child_by_name(cn, "cpu-map");
0659 if (!map)
0660 goto out;
0661
0662 ret = parse_socket(map);
0663 if (ret != 0)
0664 goto out_map;
0665
0666 topology_normalize_cpu_scale();
0667
0668
0669
0670
0671
0672 for_each_possible_cpu(cpu)
0673 if (cpu_topology[cpu].package_id < 0) {
0674 ret = -EINVAL;
0675 break;
0676 }
0677
0678 out_map:
0679 of_node_put(map);
0680 out:
0681 of_node_put(cn);
0682 return ret;
0683 }
0684 #endif
0685
0686
0687
0688
0689 struct cpu_topology cpu_topology[NR_CPUS];
0690 EXPORT_SYMBOL_GPL(cpu_topology);
0691
0692 const struct cpumask *cpu_coregroup_mask(int cpu)
0693 {
0694 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
0695
0696
0697 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
0698
0699 core_mask = &cpu_topology[cpu].core_sibling;
0700 }
0701
0702 if (last_level_cache_is_valid(cpu)) {
0703 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
0704 core_mask = &cpu_topology[cpu].llc_sibling;
0705 }
0706
0707
0708
0709
0710
0711
0712 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
0713 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
0714 core_mask = &cpu_topology[cpu].cluster_sibling;
0715
0716 return core_mask;
0717 }
0718
0719 const struct cpumask *cpu_clustergroup_mask(int cpu)
0720 {
0721
0722
0723
0724
0725 if (cpumask_subset(cpu_coregroup_mask(cpu),
0726 &cpu_topology[cpu].cluster_sibling))
0727 return topology_sibling_cpumask(cpu);
0728
0729 return &cpu_topology[cpu].cluster_sibling;
0730 }
0731
0732 void update_siblings_masks(unsigned int cpuid)
0733 {
0734 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
0735 int cpu, ret;
0736
0737 ret = detect_cache_attributes(cpuid);
0738 if (ret && ret != -ENOENT)
0739 pr_info("Early cacheinfo failed, ret = %d\n", ret);
0740
0741
0742 for_each_online_cpu(cpu) {
0743 cpu_topo = &cpu_topology[cpu];
0744
0745 if (last_level_cache_is_shared(cpu, cpuid)) {
0746 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
0747 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
0748 }
0749
0750 if (cpuid_topo->package_id != cpu_topo->package_id)
0751 continue;
0752
0753 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
0754 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
0755
0756 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
0757 continue;
0758
0759 if (cpuid_topo->cluster_id >= 0) {
0760 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
0761 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
0762 }
0763
0764 if (cpuid_topo->core_id != cpu_topo->core_id)
0765 continue;
0766
0767 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
0768 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
0769 }
0770 }
0771
0772 static void clear_cpu_topology(int cpu)
0773 {
0774 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
0775
0776 cpumask_clear(&cpu_topo->llc_sibling);
0777 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
0778
0779 cpumask_clear(&cpu_topo->cluster_sibling);
0780 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
0781
0782 cpumask_clear(&cpu_topo->core_sibling);
0783 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
0784 cpumask_clear(&cpu_topo->thread_sibling);
0785 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
0786 }
0787
0788 void __init reset_cpu_topology(void)
0789 {
0790 unsigned int cpu;
0791
0792 for_each_possible_cpu(cpu) {
0793 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
0794
0795 cpu_topo->thread_id = -1;
0796 cpu_topo->core_id = -1;
0797 cpu_topo->cluster_id = -1;
0798 cpu_topo->package_id = -1;
0799
0800 clear_cpu_topology(cpu);
0801 }
0802 }
0803
0804 void remove_cpu_topology(unsigned int cpu)
0805 {
0806 int sibling;
0807
0808 for_each_cpu(sibling, topology_core_cpumask(cpu))
0809 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
0810 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
0811 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
0812 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
0813 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
0814 for_each_cpu(sibling, topology_llc_cpumask(cpu))
0815 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
0816
0817 clear_cpu_topology(cpu);
0818 }
0819
0820 __weak int __init parse_acpi_topology(void)
0821 {
0822 return 0;
0823 }
0824
0825 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
0826 void __init init_cpu_topology(void)
0827 {
0828 int ret;
0829
0830 reset_cpu_topology();
0831 ret = parse_acpi_topology();
0832 if (!ret)
0833 ret = of_have_populated_dt() && parse_dt_topology();
0834
0835 if (ret) {
0836
0837
0838
0839
0840 reset_cpu_topology();
0841 return;
0842 }
0843 }
0844 #endif