0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009
0010 #include <linux/acpi.h>
0011 #include <linux/bitops.h>
0012 #include <linux/cacheinfo.h>
0013 #include <linux/compiler.h>
0014 #include <linux/cpu.h>
0015 #include <linux/device.h>
0016 #include <linux/init.h>
0017 #include <linux/of_device.h>
0018 #include <linux/sched.h>
0019 #include <linux/slab.h>
0020 #include <linux/smp.h>
0021 #include <linux/sysfs.h>
0022
0023
0024 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
0025 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
0026 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
0027 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
0028 #define per_cpu_cacheinfo_idx(cpu, idx) \
0029 (per_cpu_cacheinfo(cpu) + (idx))
0030
0031 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
0032 {
0033 return ci_cacheinfo(cpu);
0034 }
0035
0036 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
0037 struct cacheinfo *sib_leaf)
0038 {
0039
0040
0041
0042
0043
0044 if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
0045 return !(this_leaf->level == 1);
0046
0047 if ((sib_leaf->attributes & CACHE_ID) &&
0048 (this_leaf->attributes & CACHE_ID))
0049 return sib_leaf->id == this_leaf->id;
0050
0051 return sib_leaf->fw_token == this_leaf->fw_token;
0052 }
0053
0054 bool last_level_cache_is_valid(unsigned int cpu)
0055 {
0056 struct cacheinfo *llc;
0057
0058 if (!cache_leaves(cpu))
0059 return false;
0060
0061 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
0062
0063 return (llc->attributes & CACHE_ID) || !!llc->fw_token;
0064
0065 }
0066
0067 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
0068 {
0069 struct cacheinfo *llc_x, *llc_y;
0070
0071 if (!last_level_cache_is_valid(cpu_x) ||
0072 !last_level_cache_is_valid(cpu_y))
0073 return false;
0074
0075 llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
0076 llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
0077
0078 return cache_leaves_are_shared(llc_x, llc_y);
0079 }
0080
0081 #ifdef CONFIG_OF
0082
0083 struct cache_type_info {
0084 const char *size_prop;
0085 const char *line_size_props[2];
0086 const char *nr_sets_prop;
0087 };
0088
0089 static const struct cache_type_info cache_type_info[] = {
0090 {
0091 .size_prop = "cache-size",
0092 .line_size_props = { "cache-line-size",
0093 "cache-block-size", },
0094 .nr_sets_prop = "cache-sets",
0095 }, {
0096 .size_prop = "i-cache-size",
0097 .line_size_props = { "i-cache-line-size",
0098 "i-cache-block-size", },
0099 .nr_sets_prop = "i-cache-sets",
0100 }, {
0101 .size_prop = "d-cache-size",
0102 .line_size_props = { "d-cache-line-size",
0103 "d-cache-block-size", },
0104 .nr_sets_prop = "d-cache-sets",
0105 },
0106 };
0107
0108 static inline int get_cacheinfo_idx(enum cache_type type)
0109 {
0110 if (type == CACHE_TYPE_UNIFIED)
0111 return 0;
0112 return type;
0113 }
0114
0115 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
0116 {
0117 const char *propname;
0118 int ct_idx;
0119
0120 ct_idx = get_cacheinfo_idx(this_leaf->type);
0121 propname = cache_type_info[ct_idx].size_prop;
0122
0123 of_property_read_u32(np, propname, &this_leaf->size);
0124 }
0125
0126
0127 static void cache_get_line_size(struct cacheinfo *this_leaf,
0128 struct device_node *np)
0129 {
0130 int i, lim, ct_idx;
0131
0132 ct_idx = get_cacheinfo_idx(this_leaf->type);
0133 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
0134
0135 for (i = 0; i < lim; i++) {
0136 int ret;
0137 u32 line_size;
0138 const char *propname;
0139
0140 propname = cache_type_info[ct_idx].line_size_props[i];
0141 ret = of_property_read_u32(np, propname, &line_size);
0142 if (!ret) {
0143 this_leaf->coherency_line_size = line_size;
0144 break;
0145 }
0146 }
0147 }
0148
0149 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
0150 {
0151 const char *propname;
0152 int ct_idx;
0153
0154 ct_idx = get_cacheinfo_idx(this_leaf->type);
0155 propname = cache_type_info[ct_idx].nr_sets_prop;
0156
0157 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
0158 }
0159
0160 static void cache_associativity(struct cacheinfo *this_leaf)
0161 {
0162 unsigned int line_size = this_leaf->coherency_line_size;
0163 unsigned int nr_sets = this_leaf->number_of_sets;
0164 unsigned int size = this_leaf->size;
0165
0166
0167
0168
0169
0170 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
0171 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
0172 }
0173
0174 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
0175 struct device_node *np)
0176 {
0177 return of_property_read_bool(np, "cache-unified");
0178 }
0179
0180 static void cache_of_set_props(struct cacheinfo *this_leaf,
0181 struct device_node *np)
0182 {
0183
0184
0185
0186
0187
0188 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
0189 cache_node_is_unified(this_leaf, np))
0190 this_leaf->type = CACHE_TYPE_UNIFIED;
0191 cache_size(this_leaf, np);
0192 cache_get_line_size(this_leaf, np);
0193 cache_nr_sets(this_leaf, np);
0194 cache_associativity(this_leaf);
0195 }
0196
0197 static int cache_setup_of_node(unsigned int cpu)
0198 {
0199 struct device_node *np;
0200 struct cacheinfo *this_leaf;
0201 unsigned int index = 0;
0202
0203 np = of_cpu_device_node_get(cpu);
0204 if (!np) {
0205 pr_err("Failed to find cpu%d device node\n", cpu);
0206 return -ENOENT;
0207 }
0208
0209 while (index < cache_leaves(cpu)) {
0210 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
0211 if (this_leaf->level != 1)
0212 np = of_find_next_cache_node(np);
0213 else
0214 np = of_node_get(np);
0215 if (!np)
0216 break;
0217 cache_of_set_props(this_leaf, np);
0218 this_leaf->fw_token = np;
0219 index++;
0220 }
0221
0222 if (index != cache_leaves(cpu))
0223 return -ENOENT;
0224
0225 return 0;
0226 }
0227 #else
0228 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
0229 #endif
0230
0231 int __weak cache_setup_acpi(unsigned int cpu)
0232 {
0233 return -ENOTSUPP;
0234 }
0235
0236 unsigned int coherency_max_size;
0237
0238 static int cache_setup_properties(unsigned int cpu)
0239 {
0240 int ret = 0;
0241
0242 if (of_have_populated_dt())
0243 ret = cache_setup_of_node(cpu);
0244 else if (!acpi_disabled)
0245 ret = cache_setup_acpi(cpu);
0246
0247 return ret;
0248 }
0249
0250 static int cache_shared_cpu_map_setup(unsigned int cpu)
0251 {
0252 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
0253 struct cacheinfo *this_leaf, *sib_leaf;
0254 unsigned int index;
0255 int ret = 0;
0256
0257 if (this_cpu_ci->cpu_map_populated)
0258 return 0;
0259
0260
0261
0262
0263
0264
0265 if (!last_level_cache_is_valid(cpu)) {
0266 ret = cache_setup_properties(cpu);
0267 if (ret)
0268 return ret;
0269 }
0270
0271 for (index = 0; index < cache_leaves(cpu); index++) {
0272 unsigned int i;
0273
0274 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
0275
0276 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
0277 for_each_online_cpu(i) {
0278 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
0279
0280 if (i == cpu || !sib_cpu_ci->info_list)
0281 continue;
0282
0283 sib_leaf = per_cpu_cacheinfo_idx(i, index);
0284 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
0285 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
0286 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
0287 }
0288 }
0289
0290 if (this_leaf->coherency_line_size > coherency_max_size)
0291 coherency_max_size = this_leaf->coherency_line_size;
0292 }
0293
0294 return 0;
0295 }
0296
0297 static void cache_shared_cpu_map_remove(unsigned int cpu)
0298 {
0299 struct cacheinfo *this_leaf, *sib_leaf;
0300 unsigned int sibling, index;
0301
0302 for (index = 0; index < cache_leaves(cpu); index++) {
0303 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
0304 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
0305 struct cpu_cacheinfo *sib_cpu_ci =
0306 get_cpu_cacheinfo(sibling);
0307
0308 if (sibling == cpu || !sib_cpu_ci->info_list)
0309 continue;
0310
0311 sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
0312 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
0313 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
0314 }
0315 if (of_have_populated_dt())
0316 of_node_put(this_leaf->fw_token);
0317 }
0318 }
0319
0320 static void free_cache_attributes(unsigned int cpu)
0321 {
0322 if (!per_cpu_cacheinfo(cpu))
0323 return;
0324
0325 cache_shared_cpu_map_remove(cpu);
0326
0327 kfree(per_cpu_cacheinfo(cpu));
0328 per_cpu_cacheinfo(cpu) = NULL;
0329 cache_leaves(cpu) = 0;
0330 }
0331
0332 int __weak init_cache_level(unsigned int cpu)
0333 {
0334 return -ENOENT;
0335 }
0336
0337 int __weak populate_cache_leaves(unsigned int cpu)
0338 {
0339 return -ENOENT;
0340 }
0341
0342 int detect_cache_attributes(unsigned int cpu)
0343 {
0344 int ret;
0345
0346
0347
0348
0349
0350
0351
0352 if (per_cpu_cacheinfo(cpu))
0353 goto update_cpu_map;
0354
0355 if (init_cache_level(cpu) || !cache_leaves(cpu))
0356 return -ENOENT;
0357
0358 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
0359 sizeof(struct cacheinfo), GFP_ATOMIC);
0360 if (per_cpu_cacheinfo(cpu) == NULL) {
0361 cache_leaves(cpu) = 0;
0362 return -ENOMEM;
0363 }
0364
0365
0366
0367
0368
0369 ret = populate_cache_leaves(cpu);
0370 if (ret)
0371 goto free_ci;
0372
0373 update_cpu_map:
0374
0375
0376
0377
0378
0379 ret = cache_shared_cpu_map_setup(cpu);
0380 if (ret) {
0381 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
0382 goto free_ci;
0383 }
0384
0385 return 0;
0386
0387 free_ci:
0388 free_cache_attributes(cpu);
0389 return ret;
0390 }
0391
0392
0393 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
0394 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
0395
0396 static cpumask_t cache_dev_map;
0397
0398
0399 static DEFINE_PER_CPU(struct device **, ci_index_dev);
0400 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
0401 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
0402
0403 #define show_one(file_name, object) \
0404 static ssize_t file_name##_show(struct device *dev, \
0405 struct device_attribute *attr, char *buf) \
0406 { \
0407 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
0408 return sysfs_emit(buf, "%u\n", this_leaf->object); \
0409 }
0410
0411 show_one(id, id);
0412 show_one(level, level);
0413 show_one(coherency_line_size, coherency_line_size);
0414 show_one(number_of_sets, number_of_sets);
0415 show_one(physical_line_partition, physical_line_partition);
0416 show_one(ways_of_associativity, ways_of_associativity);
0417
0418 static ssize_t size_show(struct device *dev,
0419 struct device_attribute *attr, char *buf)
0420 {
0421 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0422
0423 return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
0424 }
0425
0426 static ssize_t shared_cpu_map_show(struct device *dev,
0427 struct device_attribute *attr, char *buf)
0428 {
0429 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0430 const struct cpumask *mask = &this_leaf->shared_cpu_map;
0431
0432 return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
0433 }
0434
0435 static ssize_t shared_cpu_list_show(struct device *dev,
0436 struct device_attribute *attr, char *buf)
0437 {
0438 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0439 const struct cpumask *mask = &this_leaf->shared_cpu_map;
0440
0441 return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
0442 }
0443
0444 static ssize_t type_show(struct device *dev,
0445 struct device_attribute *attr, char *buf)
0446 {
0447 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0448 const char *output;
0449
0450 switch (this_leaf->type) {
0451 case CACHE_TYPE_DATA:
0452 output = "Data";
0453 break;
0454 case CACHE_TYPE_INST:
0455 output = "Instruction";
0456 break;
0457 case CACHE_TYPE_UNIFIED:
0458 output = "Unified";
0459 break;
0460 default:
0461 return -EINVAL;
0462 }
0463
0464 return sysfs_emit(buf, "%s\n", output);
0465 }
0466
0467 static ssize_t allocation_policy_show(struct device *dev,
0468 struct device_attribute *attr, char *buf)
0469 {
0470 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0471 unsigned int ci_attr = this_leaf->attributes;
0472 const char *output;
0473
0474 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
0475 output = "ReadWriteAllocate";
0476 else if (ci_attr & CACHE_READ_ALLOCATE)
0477 output = "ReadAllocate";
0478 else if (ci_attr & CACHE_WRITE_ALLOCATE)
0479 output = "WriteAllocate";
0480 else
0481 return 0;
0482
0483 return sysfs_emit(buf, "%s\n", output);
0484 }
0485
0486 static ssize_t write_policy_show(struct device *dev,
0487 struct device_attribute *attr, char *buf)
0488 {
0489 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0490 unsigned int ci_attr = this_leaf->attributes;
0491 int n = 0;
0492
0493 if (ci_attr & CACHE_WRITE_THROUGH)
0494 n = sysfs_emit(buf, "WriteThrough\n");
0495 else if (ci_attr & CACHE_WRITE_BACK)
0496 n = sysfs_emit(buf, "WriteBack\n");
0497 return n;
0498 }
0499
0500 static DEVICE_ATTR_RO(id);
0501 static DEVICE_ATTR_RO(level);
0502 static DEVICE_ATTR_RO(type);
0503 static DEVICE_ATTR_RO(coherency_line_size);
0504 static DEVICE_ATTR_RO(ways_of_associativity);
0505 static DEVICE_ATTR_RO(number_of_sets);
0506 static DEVICE_ATTR_RO(size);
0507 static DEVICE_ATTR_RO(allocation_policy);
0508 static DEVICE_ATTR_RO(write_policy);
0509 static DEVICE_ATTR_RO(shared_cpu_map);
0510 static DEVICE_ATTR_RO(shared_cpu_list);
0511 static DEVICE_ATTR_RO(physical_line_partition);
0512
0513 static struct attribute *cache_default_attrs[] = {
0514 &dev_attr_id.attr,
0515 &dev_attr_type.attr,
0516 &dev_attr_level.attr,
0517 &dev_attr_shared_cpu_map.attr,
0518 &dev_attr_shared_cpu_list.attr,
0519 &dev_attr_coherency_line_size.attr,
0520 &dev_attr_ways_of_associativity.attr,
0521 &dev_attr_number_of_sets.attr,
0522 &dev_attr_size.attr,
0523 &dev_attr_allocation_policy.attr,
0524 &dev_attr_write_policy.attr,
0525 &dev_attr_physical_line_partition.attr,
0526 NULL
0527 };
0528
0529 static umode_t
0530 cache_default_attrs_is_visible(struct kobject *kobj,
0531 struct attribute *attr, int unused)
0532 {
0533 struct device *dev = kobj_to_dev(kobj);
0534 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
0535 const struct cpumask *mask = &this_leaf->shared_cpu_map;
0536 umode_t mode = attr->mode;
0537
0538 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
0539 return mode;
0540 if ((attr == &dev_attr_type.attr) && this_leaf->type)
0541 return mode;
0542 if ((attr == &dev_attr_level.attr) && this_leaf->level)
0543 return mode;
0544 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
0545 return mode;
0546 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
0547 return mode;
0548 if ((attr == &dev_attr_coherency_line_size.attr) &&
0549 this_leaf->coherency_line_size)
0550 return mode;
0551 if ((attr == &dev_attr_ways_of_associativity.attr) &&
0552 this_leaf->size)
0553 return mode;
0554 if ((attr == &dev_attr_number_of_sets.attr) &&
0555 this_leaf->number_of_sets)
0556 return mode;
0557 if ((attr == &dev_attr_size.attr) && this_leaf->size)
0558 return mode;
0559 if ((attr == &dev_attr_write_policy.attr) &&
0560 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
0561 return mode;
0562 if ((attr == &dev_attr_allocation_policy.attr) &&
0563 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
0564 return mode;
0565 if ((attr == &dev_attr_physical_line_partition.attr) &&
0566 this_leaf->physical_line_partition)
0567 return mode;
0568
0569 return 0;
0570 }
0571
0572 static const struct attribute_group cache_default_group = {
0573 .attrs = cache_default_attrs,
0574 .is_visible = cache_default_attrs_is_visible,
0575 };
0576
0577 static const struct attribute_group *cache_default_groups[] = {
0578 &cache_default_group,
0579 NULL,
0580 };
0581
0582 static const struct attribute_group *cache_private_groups[] = {
0583 &cache_default_group,
0584 NULL,
0585 NULL,
0586 };
0587
0588 const struct attribute_group *
0589 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
0590 {
0591 return NULL;
0592 }
0593
0594 static const struct attribute_group **
0595 cache_get_attribute_groups(struct cacheinfo *this_leaf)
0596 {
0597 const struct attribute_group *priv_group =
0598 cache_get_priv_group(this_leaf);
0599
0600 if (!priv_group)
0601 return cache_default_groups;
0602
0603 if (!cache_private_groups[1])
0604 cache_private_groups[1] = priv_group;
0605
0606 return cache_private_groups;
0607 }
0608
0609
0610 static void cpu_cache_sysfs_exit(unsigned int cpu)
0611 {
0612 int i;
0613 struct device *ci_dev;
0614
0615 if (per_cpu_index_dev(cpu)) {
0616 for (i = 0; i < cache_leaves(cpu); i++) {
0617 ci_dev = per_cache_index_dev(cpu, i);
0618 if (!ci_dev)
0619 continue;
0620 device_unregister(ci_dev);
0621 }
0622 kfree(per_cpu_index_dev(cpu));
0623 per_cpu_index_dev(cpu) = NULL;
0624 }
0625 device_unregister(per_cpu_cache_dev(cpu));
0626 per_cpu_cache_dev(cpu) = NULL;
0627 }
0628
0629 static int cpu_cache_sysfs_init(unsigned int cpu)
0630 {
0631 struct device *dev = get_cpu_device(cpu);
0632
0633 if (per_cpu_cacheinfo(cpu) == NULL)
0634 return -ENOENT;
0635
0636 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
0637 if (IS_ERR(per_cpu_cache_dev(cpu)))
0638 return PTR_ERR(per_cpu_cache_dev(cpu));
0639
0640
0641 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
0642 sizeof(struct device *), GFP_KERNEL);
0643 if (unlikely(per_cpu_index_dev(cpu) == NULL))
0644 goto err_out;
0645
0646 return 0;
0647
0648 err_out:
0649 cpu_cache_sysfs_exit(cpu);
0650 return -ENOMEM;
0651 }
0652
0653 static int cache_add_dev(unsigned int cpu)
0654 {
0655 unsigned int i;
0656 int rc;
0657 struct device *ci_dev, *parent;
0658 struct cacheinfo *this_leaf;
0659 const struct attribute_group **cache_groups;
0660
0661 rc = cpu_cache_sysfs_init(cpu);
0662 if (unlikely(rc < 0))
0663 return rc;
0664
0665 parent = per_cpu_cache_dev(cpu);
0666 for (i = 0; i < cache_leaves(cpu); i++) {
0667 this_leaf = per_cpu_cacheinfo_idx(cpu, i);
0668 if (this_leaf->disable_sysfs)
0669 continue;
0670 if (this_leaf->type == CACHE_TYPE_NOCACHE)
0671 break;
0672 cache_groups = cache_get_attribute_groups(this_leaf);
0673 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
0674 "index%1u", i);
0675 if (IS_ERR(ci_dev)) {
0676 rc = PTR_ERR(ci_dev);
0677 goto err;
0678 }
0679 per_cache_index_dev(cpu, i) = ci_dev;
0680 }
0681 cpumask_set_cpu(cpu, &cache_dev_map);
0682
0683 return 0;
0684 err:
0685 cpu_cache_sysfs_exit(cpu);
0686 return rc;
0687 }
0688
0689 static int cacheinfo_cpu_online(unsigned int cpu)
0690 {
0691 int rc = detect_cache_attributes(cpu);
0692
0693 if (rc)
0694 return rc;
0695 rc = cache_add_dev(cpu);
0696 if (rc)
0697 free_cache_attributes(cpu);
0698 return rc;
0699 }
0700
0701 static int cacheinfo_cpu_pre_down(unsigned int cpu)
0702 {
0703 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
0704 cpu_cache_sysfs_exit(cpu);
0705
0706 free_cache_attributes(cpu);
0707 return 0;
0708 }
0709
0710 static int __init cacheinfo_sysfs_init(void)
0711 {
0712 return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
0713 "base/cacheinfo:online",
0714 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
0715 }
0716 device_initcall(cacheinfo_sysfs_init);