Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * MIPS cacheinfo support
0004  */
0005 #include <linux/cacheinfo.h>
0006 
0007 /* Populates leaf and increments to next leaf */
0008 #define populate_cache(cache, leaf, c_level, c_type)        \
0009 do {                                \
0010     leaf->type = c_type;                    \
0011     leaf->level = c_level;                  \
0012     leaf->coherency_line_size = c->cache.linesz;        \
0013     leaf->number_of_sets = c->cache.sets;           \
0014     leaf->ways_of_associativity = c->cache.ways;        \
0015     leaf->size = c->cache.linesz * c->cache.sets *      \
0016         c->cache.ways;                  \
0017     leaf++;                         \
0018 } while (0)
0019 
0020 int init_cache_level(unsigned int cpu)
0021 {
0022     struct cpuinfo_mips *c = &current_cpu_data;
0023     struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
0024     int levels = 0, leaves = 0;
0025 
0026     /*
0027      * If Dcache is not set, we assume the cache structures
0028      * are not properly initialized.
0029      */
0030     if (c->dcache.waysize)
0031         levels += 1;
0032     else
0033         return -ENOENT;
0034 
0035 
0036     leaves += (c->icache.waysize) ? 2 : 1;
0037 
0038     if (c->vcache.waysize) {
0039         levels++;
0040         leaves++;
0041     }
0042 
0043     if (c->scache.waysize) {
0044         levels++;
0045         leaves++;
0046     }
0047 
0048     if (c->tcache.waysize) {
0049         levels++;
0050         leaves++;
0051     }
0052 
0053     this_cpu_ci->num_levels = levels;
0054     this_cpu_ci->num_leaves = leaves;
0055     return 0;
0056 }
0057 
0058 static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
0059 {
0060     int cpu1;
0061 
0062     for_each_possible_cpu(cpu1)
0063         if (cpus_are_siblings(cpu, cpu1))
0064             cpumask_set_cpu(cpu1, cpu_map);
0065 }
0066 
0067 static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
0068 {
0069     int cpu1;
0070     int cluster = cpu_cluster(&cpu_data[cpu]);
0071 
0072     for_each_possible_cpu(cpu1)
0073         if (cpu_cluster(&cpu_data[cpu1]) == cluster)
0074             cpumask_set_cpu(cpu1, cpu_map);
0075 }
0076 
0077 int populate_cache_leaves(unsigned int cpu)
0078 {
0079     struct cpuinfo_mips *c = &current_cpu_data;
0080     struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
0081     struct cacheinfo *this_leaf = this_cpu_ci->info_list;
0082     int level = 1;
0083 
0084     if (c->icache.waysize) {
0085         /* I/D caches are per core */
0086         fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
0087         populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
0088         fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
0089         populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
0090         level++;
0091     } else {
0092         populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
0093         level++;
0094     }
0095 
0096     if (c->vcache.waysize) {
0097         /* Vcache is per core as well */
0098         fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
0099         populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
0100         level++;
0101     }
0102 
0103     if (c->scache.waysize) {
0104         /* Scache is per cluster */
0105         fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
0106         populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
0107         level++;
0108     }
0109 
0110     if (c->tcache.waysize)
0111         populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
0112 
0113     this_cpu_ci->cpu_map_populated = true;
0114 
0115     return 0;
0116 }