Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Extract CPU cache information and expose them via sysfs.
0004  *
0005  *    Copyright IBM Corp. 2012
0006  */
0007 
0008 #include <linux/seq_file.h>
0009 #include <linux/cpu.h>
0010 #include <linux/cacheinfo.h>
0011 #include <asm/facility.h>
0012 
0013 enum {
0014     CACHE_SCOPE_NOTEXISTS,
0015     CACHE_SCOPE_PRIVATE,
0016     CACHE_SCOPE_SHARED,
0017     CACHE_SCOPE_RESERVED,
0018 };
0019 
0020 enum {
0021     CTYPE_SEPARATE,
0022     CTYPE_DATA,
0023     CTYPE_INSTRUCTION,
0024     CTYPE_UNIFIED,
0025 };
0026 
0027 enum {
0028     EXTRACT_TOPOLOGY,
0029     EXTRACT_LINE_SIZE,
0030     EXTRACT_SIZE,
0031     EXTRACT_ASSOCIATIVITY,
0032 };
0033 
0034 enum {
0035     CACHE_TI_UNIFIED = 0,
0036     CACHE_TI_DATA = 0,
0037     CACHE_TI_INSTRUCTION,
0038 };
0039 
0040 struct cache_info {
0041     unsigned char       : 4;
0042     unsigned char scope : 2;
0043     unsigned char type  : 2;
0044 };
0045 
0046 #define CACHE_MAX_LEVEL 8
0047 union cache_topology {
0048     struct cache_info ci[CACHE_MAX_LEVEL];
0049     unsigned long long raw;
0050 };
0051 
0052 static const char * const cache_type_string[] = {
0053     "",
0054     "Instruction",
0055     "Data",
0056     "",
0057     "Unified",
0058 };
0059 
0060 static const enum cache_type cache_type_map[] = {
0061     [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
0062     [CTYPE_DATA] = CACHE_TYPE_DATA,
0063     [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
0064     [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
0065 };
0066 
0067 void show_cacheinfo(struct seq_file *m)
0068 {
0069     struct cpu_cacheinfo *this_cpu_ci;
0070     struct cacheinfo *cache;
0071     int idx;
0072 
0073     this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
0074     for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
0075         cache = this_cpu_ci->info_list + idx;
0076         seq_printf(m, "cache%-11d: ", idx);
0077         seq_printf(m, "level=%d ", cache->level);
0078         seq_printf(m, "type=%s ", cache_type_string[cache->type]);
0079         seq_printf(m, "scope=%s ",
0080                cache->disable_sysfs ? "Shared" : "Private");
0081         seq_printf(m, "size=%dK ", cache->size >> 10);
0082         seq_printf(m, "line_size=%u ", cache->coherency_line_size);
0083         seq_printf(m, "associativity=%d", cache->ways_of_associativity);
0084         seq_puts(m, "\n");
0085     }
0086 }
0087 
0088 static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
0089 {
0090     if (level >= CACHE_MAX_LEVEL)
0091         return CACHE_TYPE_NOCACHE;
0092     ci += level;
0093     if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
0094         return CACHE_TYPE_NOCACHE;
0095     return cache_type_map[ci->type];
0096 }
0097 
0098 static inline unsigned long ecag(int ai, int li, int ti)
0099 {
0100     return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
0101 }
0102 
0103 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
0104              enum cache_type type, unsigned int level, int cpu)
0105 {
0106     int ti, num_sets;
0107 
0108     if (type == CACHE_TYPE_INST)
0109         ti = CACHE_TI_INSTRUCTION;
0110     else
0111         ti = CACHE_TI_UNIFIED;
0112     this_leaf->level = level + 1;
0113     this_leaf->type = type;
0114     this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
0115     this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
0116     this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
0117     num_sets = this_leaf->size / this_leaf->coherency_line_size;
0118     num_sets /= this_leaf->ways_of_associativity;
0119     this_leaf->number_of_sets = num_sets;
0120     cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
0121     if (!private)
0122         this_leaf->disable_sysfs = true;
0123 }
0124 
0125 int init_cache_level(unsigned int cpu)
0126 {
0127     struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
0128     unsigned int level = 0, leaves = 0;
0129     union cache_topology ct;
0130     enum cache_type ctype;
0131 
0132     if (!this_cpu_ci)
0133         return -EINVAL;
0134     ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
0135     do {
0136         ctype = get_cache_type(&ct.ci[0], level);
0137         if (ctype == CACHE_TYPE_NOCACHE)
0138             break;
0139         /* Separate instruction and data caches */
0140         leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
0141     } while (++level < CACHE_MAX_LEVEL);
0142     this_cpu_ci->num_levels = level;
0143     this_cpu_ci->num_leaves = leaves;
0144     return 0;
0145 }
0146 
0147 int populate_cache_leaves(unsigned int cpu)
0148 {
0149     struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
0150     struct cacheinfo *this_leaf = this_cpu_ci->info_list;
0151     unsigned int level, idx, pvt;
0152     union cache_topology ct;
0153     enum cache_type ctype;
0154 
0155     ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
0156     for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
0157          idx < this_cpu_ci->num_leaves; idx++, level++) {
0158         if (!this_leaf)
0159             return -EINVAL;
0160         pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
0161         ctype = get_cache_type(&ct.ci[0], level);
0162         if (ctype == CACHE_TYPE_SEPARATE) {
0163             ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
0164             ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
0165         } else {
0166             ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
0167         }
0168     }
0169     return 0;
0170 }