Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * cppc.c: CPPC Interface for x86
0004  * Copyright (c) 2016, Intel Corporation.
0005  */
0006 
0007 #include <acpi/cppc_acpi.h>
0008 #include <asm/msr.h>
0009 #include <asm/processor.h>
0010 #include <asm/topology.h>
0011 
0012 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
0013 
0014 bool cpc_supported_by_cpu(void)
0015 {
0016     switch (boot_cpu_data.x86_vendor) {
0017     case X86_VENDOR_AMD:
0018     case X86_VENDOR_HYGON:
0019         if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
0020             (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
0021             return true;
0022         else if (boot_cpu_data.x86 == 0x17 &&
0023              boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
0024             return true;
0025         return boot_cpu_has(X86_FEATURE_CPPC);
0026     }
0027     return false;
0028 }
0029 
0030 bool cpc_ffh_supported(void)
0031 {
0032     return true;
0033 }
0034 
0035 int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
0036 {
0037     int err;
0038 
0039     err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
0040     if (!err) {
0041         u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
0042                        reg->bit_offset);
0043 
0044         *val &= mask;
0045         *val >>= reg->bit_offset;
0046     }
0047     return err;
0048 }
0049 
0050 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
0051 {
0052     u64 rd_val;
0053     int err;
0054 
0055     err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
0056     if (!err) {
0057         u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
0058                        reg->bit_offset);
0059 
0060         val <<= reg->bit_offset;
0061         val &= mask;
0062         rd_val &= ~mask;
0063         rd_val |= val;
0064         err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
0065     }
0066     return err;
0067 }
0068 
0069 static void amd_set_max_freq_ratio(void)
0070 {
0071     struct cppc_perf_caps perf_caps;
0072     u64 highest_perf, nominal_perf;
0073     u64 perf_ratio;
0074     int rc;
0075 
0076     rc = cppc_get_perf_caps(0, &perf_caps);
0077     if (rc) {
0078         pr_debug("Could not retrieve perf counters (%d)\n", rc);
0079         return;
0080     }
0081 
0082     highest_perf = amd_get_highest_perf();
0083     nominal_perf = perf_caps.nominal_perf;
0084 
0085     if (!highest_perf || !nominal_perf) {
0086         pr_debug("Could not retrieve highest or nominal performance\n");
0087         return;
0088     }
0089 
0090     perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
0091     /* midpoint between max_boost and max_P */
0092     perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
0093     if (!perf_ratio) {
0094         pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
0095         return;
0096     }
0097 
0098     freq_invariance_set_perf_ratio(perf_ratio, false);
0099 }
0100 
0101 static DEFINE_MUTEX(freq_invariance_lock);
0102 
0103 void init_freq_invariance_cppc(void)
0104 {
0105     static bool init_done;
0106 
0107     if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
0108         return;
0109 
0110     if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
0111         return;
0112 
0113     mutex_lock(&freq_invariance_lock);
0114     if (!init_done)
0115         amd_set_max_freq_ratio();
0116     init_done = true;
0117     mutex_unlock(&freq_invariance_lock);
0118 }