0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "cacheinfo: " fmt
0011
0012 #include <linux/cpu.h>
0013 #include <linux/cpumask.h>
0014 #include <linux/kernel.h>
0015 #include <linux/kobject.h>
0016 #include <linux/list.h>
0017 #include <linux/notifier.h>
0018 #include <linux/of.h>
0019 #include <linux/percpu.h>
0020 #include <linux/slab.h>
0021 #include <asm/cputhreads.h>
0022 #include <asm/smp.h>
0023
0024 #include "cacheinfo.h"
0025
0026
0027
0028
0029
0030 struct cache_dir {
0031 struct kobject *kobj;
0032
0033 struct cache_index_dir *index;
0034 };
0035
0036
0037
0038
0039
0040 struct cache_index_dir {
0041 struct kobject kobj;
0042 struct cache_index_dir *next;
0043 struct cache *cache;
0044 };
0045
0046
0047
0048 struct cache_type_info {
0049 const char *name;
0050 const char *size_prop;
0051
0052
0053
0054
0055
0056
0057
0058
0059 const char *line_size_props[2];
0060 const char *nr_sets_prop;
0061 };
0062
0063
0064 #define CACHE_TYPE_UNIFIED 0
0065 #define CACHE_TYPE_UNIFIED_D 1
0066 #define CACHE_TYPE_INSTRUCTION 2
0067 #define CACHE_TYPE_DATA 3
0068
0069 static const struct cache_type_info cache_type_info[] = {
0070 {
0071
0072
0073 .name = "Unified",
0074 .size_prop = "cache-size",
0075 .line_size_props = { "cache-line-size",
0076 "cache-block-size", },
0077 .nr_sets_prop = "cache-sets",
0078 },
0079 {
0080
0081
0082
0083 .name = "Unified",
0084 .size_prop = "d-cache-size",
0085 .line_size_props = { "d-cache-line-size",
0086 "d-cache-block-size", },
0087 .nr_sets_prop = "d-cache-sets",
0088 },
0089 {
0090 .name = "Instruction",
0091 .size_prop = "i-cache-size",
0092 .line_size_props = { "i-cache-line-size",
0093 "i-cache-block-size", },
0094 .nr_sets_prop = "i-cache-sets",
0095 },
0096 {
0097 .name = "Data",
0098 .size_prop = "d-cache-size",
0099 .line_size_props = { "d-cache-line-size",
0100 "d-cache-block-size", },
0101 .nr_sets_prop = "d-cache-sets",
0102 },
0103 };
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 struct cache {
0118 struct device_node *ofnode;
0119 struct cpumask shared_cpu_map;
0120 int type;
0121 int level;
0122 int group_id;
0123 struct list_head list;
0124 struct cache *next_local;
0125 };
0126
0127 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
0128
0129
0130
0131
0132 static LIST_HEAD(cache_list);
0133
0134 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
0135 {
0136 return container_of(k, struct cache_index_dir, kobj);
0137 }
0138
0139 static const char *cache_type_string(const struct cache *cache)
0140 {
0141 return cache_type_info[cache->type].name;
0142 }
0143
0144 static void cache_init(struct cache *cache, int type, int level,
0145 struct device_node *ofnode, int group_id)
0146 {
0147 cache->type = type;
0148 cache->level = level;
0149 cache->ofnode = of_node_get(ofnode);
0150 cache->group_id = group_id;
0151 INIT_LIST_HEAD(&cache->list);
0152 list_add(&cache->list, &cache_list);
0153 }
0154
0155 static struct cache *new_cache(int type, int level,
0156 struct device_node *ofnode, int group_id)
0157 {
0158 struct cache *cache;
0159
0160 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
0161 if (cache)
0162 cache_init(cache, type, level, ofnode, group_id);
0163
0164 return cache;
0165 }
0166
0167 static void release_cache_debugcheck(struct cache *cache)
0168 {
0169 struct cache *iter;
0170
0171 list_for_each_entry(iter, &cache_list, list)
0172 WARN_ONCE(iter->next_local == cache,
0173 "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
0174 iter->ofnode,
0175 cache_type_string(iter),
0176 cache->ofnode,
0177 cache_type_string(cache));
0178 }
0179
0180 static void release_cache(struct cache *cache)
0181 {
0182 if (!cache)
0183 return;
0184
0185 pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
0186 cache_type_string(cache), cache->ofnode);
0187
0188 release_cache_debugcheck(cache);
0189 list_del(&cache->list);
0190 of_node_put(cache->ofnode);
0191 kfree(cache);
0192 }
0193
0194 static void cache_cpu_set(struct cache *cache, int cpu)
0195 {
0196 struct cache *next = cache;
0197
0198 while (next) {
0199 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
0200 "CPU %i already accounted in %pOFP(%s)\n",
0201 cpu, next->ofnode,
0202 cache_type_string(next));
0203 cpumask_set_cpu(cpu, &next->shared_cpu_map);
0204 next = next->next_local;
0205 }
0206 }
0207
0208 static int cache_size(const struct cache *cache, unsigned int *ret)
0209 {
0210 const char *propname;
0211 const __be32 *cache_size;
0212
0213 propname = cache_type_info[cache->type].size_prop;
0214
0215 cache_size = of_get_property(cache->ofnode, propname, NULL);
0216 if (!cache_size)
0217 return -ENODEV;
0218
0219 *ret = of_read_number(cache_size, 1);
0220 return 0;
0221 }
0222
0223 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
0224 {
0225 unsigned int size;
0226
0227 if (cache_size(cache, &size))
0228 return -ENODEV;
0229
0230 *ret = size / 1024;
0231 return 0;
0232 }
0233
0234
0235 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
0236 {
0237 const __be32 *line_size;
0238 int i, lim;
0239
0240 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
0241
0242 for (i = 0; i < lim; i++) {
0243 const char *propname;
0244
0245 propname = cache_type_info[cache->type].line_size_props[i];
0246 line_size = of_get_property(cache->ofnode, propname, NULL);
0247 if (line_size)
0248 break;
0249 }
0250
0251 if (!line_size)
0252 return -ENODEV;
0253
0254 *ret = of_read_number(line_size, 1);
0255 return 0;
0256 }
0257
0258 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
0259 {
0260 const char *propname;
0261 const __be32 *nr_sets;
0262
0263 propname = cache_type_info[cache->type].nr_sets_prop;
0264
0265 nr_sets = of_get_property(cache->ofnode, propname, NULL);
0266 if (!nr_sets)
0267 return -ENODEV;
0268
0269 *ret = of_read_number(nr_sets, 1);
0270 return 0;
0271 }
0272
0273 static int cache_associativity(const struct cache *cache, unsigned int *ret)
0274 {
0275 unsigned int line_size;
0276 unsigned int nr_sets;
0277 unsigned int size;
0278
0279 if (cache_nr_sets(cache, &nr_sets))
0280 goto err;
0281
0282
0283
0284
0285 if (nr_sets == 1) {
0286 *ret = 0;
0287 return 0;
0288 }
0289
0290 if (cache_get_line_size(cache, &line_size))
0291 goto err;
0292 if (cache_size(cache, &size))
0293 goto err;
0294
0295 if (!(nr_sets > 0 && size > 0 && line_size > 0))
0296 goto err;
0297
0298 *ret = (size / nr_sets) / line_size;
0299 return 0;
0300 err:
0301 return -ENODEV;
0302 }
0303
0304
0305 static struct cache *cache_find_first_sibling(struct cache *cache)
0306 {
0307 struct cache *iter;
0308
0309 if (cache->type == CACHE_TYPE_UNIFIED ||
0310 cache->type == CACHE_TYPE_UNIFIED_D)
0311 return cache;
0312
0313 list_for_each_entry(iter, &cache_list, list)
0314 if (iter->ofnode == cache->ofnode &&
0315 iter->group_id == cache->group_id &&
0316 iter->next_local == cache)
0317 return iter;
0318
0319 return cache;
0320 }
0321
0322
0323 static struct cache *cache_lookup_by_node_group(const struct device_node *node,
0324 int group_id)
0325 {
0326 struct cache *cache = NULL;
0327 struct cache *iter;
0328
0329 list_for_each_entry(iter, &cache_list, list) {
0330 if (iter->ofnode != node ||
0331 iter->group_id != group_id)
0332 continue;
0333 cache = cache_find_first_sibling(iter);
0334 break;
0335 }
0336
0337 return cache;
0338 }
0339
0340 static bool cache_node_is_unified(const struct device_node *np)
0341 {
0342 return of_get_property(np, "cache-unified", NULL);
0343 }
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 static int cache_is_unified_d(const struct device_node *np)
0355 {
0356 return of_get_property(np,
0357 cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
0358 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
0359 }
0360
0361 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id,
0362 int level)
0363 {
0364 pr_debug("creating L%d ucache for %pOFP\n", level, node);
0365
0366 return new_cache(cache_is_unified_d(node), level, node, group_id);
0367 }
0368
0369 static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id,
0370 int level)
0371 {
0372 struct cache *dcache, *icache;
0373
0374 pr_debug("creating L%d dcache and icache for %pOFP\n", level,
0375 node);
0376
0377 dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id);
0378 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id);
0379
0380 if (!dcache || !icache)
0381 goto err;
0382
0383 dcache->next_local = icache;
0384
0385 return dcache;
0386 err:
0387 release_cache(dcache);
0388 release_cache(icache);
0389 return NULL;
0390 }
0391
0392 static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level)
0393 {
0394 struct cache *cache;
0395
0396 if (cache_node_is_unified(node))
0397 cache = cache_do_one_devnode_unified(node, group_id, level);
0398 else
0399 cache = cache_do_one_devnode_split(node, group_id, level);
0400
0401 return cache;
0402 }
0403
0404 static struct cache *cache_lookup_or_instantiate(struct device_node *node,
0405 int group_id,
0406 int level)
0407 {
0408 struct cache *cache;
0409
0410 cache = cache_lookup_by_node_group(node, group_id);
0411
0412 WARN_ONCE(cache && cache->level != level,
0413 "cache level mismatch on lookup (got %d, expected %d)\n",
0414 cache->level, level);
0415
0416 if (!cache)
0417 cache = cache_do_one_devnode(node, group_id, level);
0418
0419 return cache;
0420 }
0421
0422 static void link_cache_lists(struct cache *smaller, struct cache *bigger)
0423 {
0424 while (smaller->next_local) {
0425 if (smaller->next_local == bigger)
0426 return;
0427 smaller = smaller->next_local;
0428 }
0429
0430 smaller->next_local = bigger;
0431
0432
0433
0434
0435
0436 WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
0437 (smaller->level > 1 && bigger->level != smaller->level + 1),
0438 "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
0439 smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
0440 }
0441
0442 static void do_subsidiary_caches_debugcheck(struct cache *cache)
0443 {
0444 WARN_ONCE(cache->level != 1,
0445 "instantiating cache chain from L%d %s cache for "
0446 "%pOFP instead of an L1\n", cache->level,
0447 cache_type_string(cache), cache->ofnode);
0448 WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
0449 "instantiating cache chain from node %pOFP of type '%s' "
0450 "instead of a cpu node\n", cache->ofnode,
0451 of_node_get_device_type(cache->ofnode));
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 static int get_group_id(unsigned int cpu_id, int level)
0464 {
0465 if (has_big_cores && level == 1)
0466 return cpumask_first(per_cpu(thread_group_l1_cache_map,
0467 cpu_id));
0468 else if (thread_group_shares_l2 && level == 2)
0469 return cpumask_first(per_cpu(thread_group_l2_cache_map,
0470 cpu_id));
0471 else if (thread_group_shares_l3 && level == 3)
0472 return cpumask_first(per_cpu(thread_group_l3_cache_map,
0473 cpu_id));
0474 return -1;
0475 }
0476
0477 static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id)
0478 {
0479 struct device_node *subcache_node;
0480 int level = cache->level;
0481
0482 do_subsidiary_caches_debugcheck(cache);
0483
0484 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
0485 struct cache *subcache;
0486 int group_id;
0487
0488 level++;
0489 group_id = get_group_id(cpu_id, level);
0490 subcache = cache_lookup_or_instantiate(subcache_node, group_id, level);
0491 of_node_put(subcache_node);
0492 if (!subcache)
0493 break;
0494
0495 link_cache_lists(cache, subcache);
0496 cache = subcache;
0497 }
0498 }
0499
0500 static struct cache *cache_chain_instantiate(unsigned int cpu_id)
0501 {
0502 struct device_node *cpu_node;
0503 struct cache *cpu_cache = NULL;
0504 int group_id;
0505
0506 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
0507
0508 cpu_node = of_get_cpu_node(cpu_id, NULL);
0509 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
0510 if (!cpu_node)
0511 goto out;
0512
0513 group_id = get_group_id(cpu_id, 1);
0514
0515 cpu_cache = cache_lookup_or_instantiate(cpu_node, group_id, 1);
0516 if (!cpu_cache)
0517 goto out;
0518
0519 do_subsidiary_caches(cpu_cache, cpu_id);
0520
0521 cache_cpu_set(cpu_cache, cpu_id);
0522 out:
0523 of_node_put(cpu_node);
0524
0525 return cpu_cache;
0526 }
0527
0528 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
0529 {
0530 struct cache_dir *cache_dir;
0531 struct device *dev;
0532 struct kobject *kobj = NULL;
0533
0534 dev = get_cpu_device(cpu_id);
0535 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
0536 if (!dev)
0537 goto err;
0538
0539 kobj = kobject_create_and_add("cache", &dev->kobj);
0540 if (!kobj)
0541 goto err;
0542
0543 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
0544 if (!cache_dir)
0545 goto err;
0546
0547 cache_dir->kobj = kobj;
0548
0549 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
0550
0551 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
0552
0553 return cache_dir;
0554 err:
0555 kobject_put(kobj);
0556 return NULL;
0557 }
0558
0559 static void cache_index_release(struct kobject *kobj)
0560 {
0561 struct cache_index_dir *index;
0562
0563 index = kobj_to_cache_index_dir(kobj);
0564
0565 pr_debug("freeing index directory for L%d %s cache\n",
0566 index->cache->level, cache_type_string(index->cache));
0567
0568 kfree(index);
0569 }
0570
0571 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
0572 {
0573 struct kobj_attribute *kobj_attr;
0574
0575 kobj_attr = container_of(attr, struct kobj_attribute, attr);
0576
0577 return kobj_attr->show(k, kobj_attr, buf);
0578 }
0579
0580 static struct cache *index_kobj_to_cache(struct kobject *k)
0581 {
0582 struct cache_index_dir *index;
0583
0584 index = kobj_to_cache_index_dir(k);
0585
0586 return index->cache;
0587 }
0588
0589 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0590 {
0591 unsigned int size_kb;
0592 struct cache *cache;
0593
0594 cache = index_kobj_to_cache(k);
0595
0596 if (cache_size_kb(cache, &size_kb))
0597 return -ENODEV;
0598
0599 return sprintf(buf, "%uK\n", size_kb);
0600 }
0601
0602 static struct kobj_attribute cache_size_attr =
0603 __ATTR(size, 0444, size_show, NULL);
0604
0605
0606 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0607 {
0608 unsigned int line_size;
0609 struct cache *cache;
0610
0611 cache = index_kobj_to_cache(k);
0612
0613 if (cache_get_line_size(cache, &line_size))
0614 return -ENODEV;
0615
0616 return sprintf(buf, "%u\n", line_size);
0617 }
0618
0619 static struct kobj_attribute cache_line_size_attr =
0620 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
0621
0622 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0623 {
0624 unsigned int nr_sets;
0625 struct cache *cache;
0626
0627 cache = index_kobj_to_cache(k);
0628
0629 if (cache_nr_sets(cache, &nr_sets))
0630 return -ENODEV;
0631
0632 return sprintf(buf, "%u\n", nr_sets);
0633 }
0634
0635 static struct kobj_attribute cache_nr_sets_attr =
0636 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
0637
0638 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0639 {
0640 unsigned int associativity;
0641 struct cache *cache;
0642
0643 cache = index_kobj_to_cache(k);
0644
0645 if (cache_associativity(cache, &associativity))
0646 return -ENODEV;
0647
0648 return sprintf(buf, "%u\n", associativity);
0649 }
0650
0651 static struct kobj_attribute cache_assoc_attr =
0652 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
0653
0654 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0655 {
0656 struct cache *cache;
0657
0658 cache = index_kobj_to_cache(k);
0659
0660 return sprintf(buf, "%s\n", cache_type_string(cache));
0661 }
0662
0663 static struct kobj_attribute cache_type_attr =
0664 __ATTR(type, 0444, type_show, NULL);
0665
0666 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0667 {
0668 struct cache_index_dir *index;
0669 struct cache *cache;
0670
0671 index = kobj_to_cache_index_dir(k);
0672 cache = index->cache;
0673
0674 return sprintf(buf, "%d\n", cache->level);
0675 }
0676
0677 static struct kobj_attribute cache_level_attr =
0678 __ATTR(level, 0444, level_show, NULL);
0679
0680 static ssize_t
0681 show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
0682 {
0683 struct cache_index_dir *index;
0684 struct cache *cache;
0685 const struct cpumask *mask;
0686
0687 index = kobj_to_cache_index_dir(k);
0688 cache = index->cache;
0689
0690 mask = &cache->shared_cpu_map;
0691
0692 return cpumap_print_to_pagebuf(list, buf, mask);
0693 }
0694
0695 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0696 {
0697 return show_shared_cpumap(k, attr, buf, false);
0698 }
0699
0700 static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
0701 {
0702 return show_shared_cpumap(k, attr, buf, true);
0703 }
0704
0705 static struct kobj_attribute cache_shared_cpu_map_attr =
0706 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
0707
0708 static struct kobj_attribute cache_shared_cpu_list_attr =
0709 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
0710
0711
0712
0713
0714
0715 static struct attribute *cache_index_default_attrs[] = {
0716 &cache_type_attr.attr,
0717 &cache_level_attr.attr,
0718 &cache_shared_cpu_map_attr.attr,
0719 &cache_shared_cpu_list_attr.attr,
0720 NULL,
0721 };
0722 ATTRIBUTE_GROUPS(cache_index_default);
0723
0724
0725
0726
0727 static struct kobj_attribute *cache_index_opt_attrs[] = {
0728 &cache_size_attr,
0729 &cache_line_size_attr,
0730 &cache_nr_sets_attr,
0731 &cache_assoc_attr,
0732 };
0733
0734 static const struct sysfs_ops cache_index_ops = {
0735 .show = cache_index_show,
0736 };
0737
0738 static struct kobj_type cache_index_type = {
0739 .release = cache_index_release,
0740 .sysfs_ops = &cache_index_ops,
0741 .default_groups = cache_index_default_groups,
0742 };
0743
0744 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
0745 {
0746 const char *cache_type;
0747 struct cache *cache;
0748 char *buf;
0749 int i;
0750
0751 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
0752 if (!buf)
0753 return;
0754
0755 cache = dir->cache;
0756 cache_type = cache_type_string(cache);
0757
0758
0759
0760
0761
0762
0763 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
0764 struct kobj_attribute *attr;
0765 ssize_t rc;
0766
0767 attr = cache_index_opt_attrs[i];
0768
0769 rc = attr->show(&dir->kobj, attr, buf);
0770 if (rc <= 0) {
0771 pr_debug("not creating %s attribute for "
0772 "%pOFP(%s) (rc = %zd)\n",
0773 attr->attr.name, cache->ofnode,
0774 cache_type, rc);
0775 continue;
0776 }
0777 if (sysfs_create_file(&dir->kobj, &attr->attr))
0778 pr_debug("could not create %s attribute for %pOFP(%s)\n",
0779 attr->attr.name, cache->ofnode, cache_type);
0780 }
0781
0782 kfree(buf);
0783 }
0784
0785 static void cacheinfo_create_index_dir(struct cache *cache, int index,
0786 struct cache_dir *cache_dir)
0787 {
0788 struct cache_index_dir *index_dir;
0789 int rc;
0790
0791 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
0792 if (!index_dir)
0793 return;
0794
0795 index_dir->cache = cache;
0796
0797 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
0798 cache_dir->kobj, "index%d", index);
0799 if (rc) {
0800 kobject_put(&index_dir->kobj);
0801 return;
0802 }
0803
0804 index_dir->next = cache_dir->index;
0805 cache_dir->index = index_dir;
0806
0807 cacheinfo_create_index_opt_attrs(index_dir);
0808 }
0809
0810 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
0811 struct cache *cache_list)
0812 {
0813 struct cache_dir *cache_dir;
0814 struct cache *cache;
0815 int index = 0;
0816
0817 cache_dir = cacheinfo_create_cache_dir(cpu_id);
0818 if (!cache_dir)
0819 return;
0820
0821 cache = cache_list;
0822 while (cache) {
0823 cacheinfo_create_index_dir(cache, index, cache_dir);
0824 index++;
0825 cache = cache->next_local;
0826 }
0827 }
0828
0829 void cacheinfo_cpu_online(unsigned int cpu_id)
0830 {
0831 struct cache *cache;
0832
0833 cache = cache_chain_instantiate(cpu_id);
0834 if (!cache)
0835 return;
0836
0837 cacheinfo_sysfs_populate(cpu_id, cache);
0838 }
0839
0840
0841
0842 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
0843 defined(CONFIG_HOTPLUG_CPU)
0844
0845 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
0846 {
0847 struct device_node *cpu_node;
0848 struct cache *cache;
0849 int group_id;
0850
0851 cpu_node = of_get_cpu_node(cpu_id, NULL);
0852 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
0853 if (!cpu_node)
0854 return NULL;
0855
0856 group_id = get_group_id(cpu_id, 1);
0857 cache = cache_lookup_by_node_group(cpu_node, group_id);
0858 of_node_put(cpu_node);
0859
0860 return cache;
0861 }
0862
0863 static void remove_index_dirs(struct cache_dir *cache_dir)
0864 {
0865 struct cache_index_dir *index;
0866
0867 index = cache_dir->index;
0868
0869 while (index) {
0870 struct cache_index_dir *next;
0871
0872 next = index->next;
0873 kobject_put(&index->kobj);
0874 index = next;
0875 }
0876 }
0877
0878 static void remove_cache_dir(struct cache_dir *cache_dir)
0879 {
0880 remove_index_dirs(cache_dir);
0881
0882
0883 kobject_del(cache_dir->kobj);
0884
0885 kobject_put(cache_dir->kobj);
0886
0887 kfree(cache_dir);
0888 }
0889
0890 static void cache_cpu_clear(struct cache *cache, int cpu)
0891 {
0892 while (cache) {
0893 struct cache *next = cache->next_local;
0894
0895 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
0896 "CPU %i not accounted in %pOFP(%s)\n",
0897 cpu, cache->ofnode,
0898 cache_type_string(cache));
0899
0900 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
0901
0902
0903
0904 if (cpumask_empty(&cache->shared_cpu_map))
0905 release_cache(cache);
0906
0907 cache = next;
0908 }
0909 }
0910
0911 void cacheinfo_cpu_offline(unsigned int cpu_id)
0912 {
0913 struct cache_dir *cache_dir;
0914 struct cache *cache;
0915
0916
0917
0918 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
0919
0920
0921 if (cache_dir)
0922 remove_cache_dir(cache_dir);
0923
0924 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
0925
0926
0927
0928 cache = cache_lookup_by_cpu(cpu_id);
0929 if (cache)
0930 cache_cpu_clear(cache, cpu_id);
0931 }
0932
0933 void cacheinfo_teardown(void)
0934 {
0935 unsigned int cpu;
0936
0937 lockdep_assert_cpus_held();
0938
0939 for_each_online_cpu(cpu)
0940 cacheinfo_cpu_offline(cpu);
0941 }
0942
0943 void cacheinfo_rebuild(void)
0944 {
0945 unsigned int cpu;
0946
0947 lockdep_assert_cpus_held();
0948
0949 for_each_online_cpu(cpu)
0950 cacheinfo_cpu_online(cpu);
0951 }
0952
0953 #endif