0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define SWSMU_CODE_LAYER_L2
0025
0026 #include "amdgpu.h"
0027 #include "amdgpu_smu.h"
0028 #include "smu_v11_0.h"
0029 #include "smu11_driver_if_vangogh.h"
0030 #include "vangogh_ppt.h"
0031 #include "smu_v11_5_ppsmc.h"
0032 #include "smu_v11_5_pmfw.h"
0033 #include "smu_cmn.h"
0034 #include "soc15_common.h"
0035 #include "asic_reg/gc/gc_10_3_0_offset.h"
0036 #include "asic_reg/gc/gc_10_3_0_sh_mask.h"
0037 #include <asm/processor.h>
0038
0039
0040
0041
0042
0043
0044 #undef pr_err
0045 #undef pr_warn
0046 #undef pr_info
0047 #undef pr_debug
0048
0049
0050
0051
0052 #define mmSMUIO_GFX_MISC_CNTL 0x00c5
0053 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
0054
0055
0056 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
0057 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
0058 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
0059 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
0060
0061 #define FEATURE_MASK(feature) (1ULL << feature)
0062 #define SMC_DPM_FEATURE ( \
0063 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
0064 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
0065 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
0066 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
0067 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
0068 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
0069 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
0070 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
0071 FEATURE_MASK(FEATURE_GFX_DPM_BIT))
0072
0073 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
0074 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
0075 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
0076 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0),
0077 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0),
0078 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
0079 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
0080 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0),
0081 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
0082 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
0083 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
0084 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0),
0085 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0),
0086 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0),
0087 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0),
0088 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0),
0089 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0),
0090 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
0091 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
0092 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
0093 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
0094 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0),
0095 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
0096 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0),
0097 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
0098 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0),
0099 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0),
0100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0),
0101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0),
0102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
0103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0),
0104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0),
0105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0),
0106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0),
0107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0),
0108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
0109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
0110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0),
0111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0),
0112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0),
0113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0),
0114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
0115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0),
0116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0),
0117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0),
0118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0),
0119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0),
0120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0),
0121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0),
0122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0),
0123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0),
0124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0),
0125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0),
0126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0),
0127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0),
0128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0),
0129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0),
0130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0),
0131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0),
0132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0),
0133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
0134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
0135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
0136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
0137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0),
0138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
0139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
0140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
0141 };
0142
0143 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
0144 FEA_MAP(PPT),
0145 FEA_MAP(TDC),
0146 FEA_MAP(THERMAL),
0147 FEA_MAP(DS_GFXCLK),
0148 FEA_MAP(DS_SOCCLK),
0149 FEA_MAP(DS_LCLK),
0150 FEA_MAP(DS_FCLK),
0151 FEA_MAP(DS_MP1CLK),
0152 FEA_MAP(DS_MP0CLK),
0153 FEA_MAP(ATHUB_PG),
0154 FEA_MAP(CCLK_DPM),
0155 FEA_MAP(FAN_CONTROLLER),
0156 FEA_MAP(ULV),
0157 FEA_MAP(VCN_DPM),
0158 FEA_MAP(LCLK_DPM),
0159 FEA_MAP(SHUBCLK_DPM),
0160 FEA_MAP(DCFCLK_DPM),
0161 FEA_MAP(DS_DCFCLK),
0162 FEA_MAP(S0I2),
0163 FEA_MAP(SMU_LOW_POWER),
0164 FEA_MAP(GFX_DEM),
0165 FEA_MAP(PSI),
0166 FEA_MAP(PROCHOT),
0167 FEA_MAP(CPUOFF),
0168 FEA_MAP(STAPM),
0169 FEA_MAP(S0I3),
0170 FEA_MAP(DF_CSTATES),
0171 FEA_MAP(PERF_LIMIT),
0172 FEA_MAP(CORE_DLDO),
0173 FEA_MAP(RSMU_LOW_POWER),
0174 FEA_MAP(SMN_LOW_POWER),
0175 FEA_MAP(THM_LOW_POWER),
0176 FEA_MAP(SMUIO_LOW_POWER),
0177 FEA_MAP(MP1_LOW_POWER),
0178 FEA_MAP(DS_VCN),
0179 FEA_MAP(CPPC),
0180 FEA_MAP(OS_CSTATES),
0181 FEA_MAP(ISP_DPM),
0182 FEA_MAP(A55_DPM),
0183 FEA_MAP(CVIP_DSP_DPM),
0184 FEA_MAP(MSMU_LOW_POWER),
0185 FEA_MAP_REVERSE(SOCCLK),
0186 FEA_MAP_REVERSE(FCLK),
0187 FEA_MAP_HALF_REVERSE(GFX),
0188 };
0189
0190 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
0191 TAB_MAP_VALID(WATERMARKS),
0192 TAB_MAP_VALID(SMU_METRICS),
0193 TAB_MAP_VALID(CUSTOM_DPM),
0194 TAB_MAP_VALID(DPMCLOCKS),
0195 };
0196
0197 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
0198 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
0199 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
0200 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
0201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
0202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
0203 };
0204
0205 static const uint8_t vangogh_throttler_map[] = {
0206 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
0207 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
0208 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
0209 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
0210 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
0211 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
0212 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
0213 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
0214 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
0215 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT),
0216 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT),
0217 };
0218
0219 static int vangogh_tables_init(struct smu_context *smu)
0220 {
0221 struct smu_table_context *smu_table = &smu->smu_table;
0222 struct smu_table *tables = smu_table->tables;
0223 struct amdgpu_device *adev = smu->adev;
0224 uint32_t if_version;
0225 uint32_t ret = 0;
0226
0227 ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
0228 if (ret) {
0229 dev_err(adev->dev, "Failed to get smu if version!\n");
0230 goto err0_out;
0231 }
0232
0233 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
0234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
0235 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
0236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
0237 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
0238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
0239 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
0240 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
0241
0242 if (if_version < 0x3) {
0243 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
0244 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
0245 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
0246 } else {
0247 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
0248 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
0249 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
0250 }
0251 if (!smu_table->metrics_table)
0252 goto err0_out;
0253 smu_table->metrics_time = 0;
0254
0255 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
0256 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
0257 if (!smu_table->gpu_metrics_table)
0258 goto err1_out;
0259
0260 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
0261 if (!smu_table->watermarks_table)
0262 goto err2_out;
0263
0264 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
0265 if (!smu_table->clocks_table)
0266 goto err3_out;
0267
0268 return 0;
0269
0270 err3_out:
0271 kfree(smu_table->watermarks_table);
0272 err2_out:
0273 kfree(smu_table->gpu_metrics_table);
0274 err1_out:
0275 kfree(smu_table->metrics_table);
0276 err0_out:
0277 return -ENOMEM;
0278 }
0279
0280 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
0281 MetricsMember_t member,
0282 uint32_t *value)
0283 {
0284 struct smu_table_context *smu_table = &smu->smu_table;
0285 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
0286 int ret = 0;
0287
0288 ret = smu_cmn_get_metrics_table(smu,
0289 NULL,
0290 false);
0291 if (ret)
0292 return ret;
0293
0294 switch (member) {
0295 case METRICS_CURR_GFXCLK:
0296 *value = metrics->GfxclkFrequency;
0297 break;
0298 case METRICS_AVERAGE_SOCCLK:
0299 *value = metrics->SocclkFrequency;
0300 break;
0301 case METRICS_AVERAGE_VCLK:
0302 *value = metrics->VclkFrequency;
0303 break;
0304 case METRICS_AVERAGE_DCLK:
0305 *value = metrics->DclkFrequency;
0306 break;
0307 case METRICS_CURR_UCLK:
0308 *value = metrics->MemclkFrequency;
0309 break;
0310 case METRICS_AVERAGE_GFXACTIVITY:
0311 *value = metrics->GfxActivity / 100;
0312 break;
0313 case METRICS_AVERAGE_VCNACTIVITY:
0314 *value = metrics->UvdActivity;
0315 break;
0316 case METRICS_AVERAGE_SOCKETPOWER:
0317 *value = (metrics->CurrentSocketPower << 8) /
0318 1000 ;
0319 break;
0320 case METRICS_TEMPERATURE_EDGE:
0321 *value = metrics->GfxTemperature / 100 *
0322 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
0323 break;
0324 case METRICS_TEMPERATURE_HOTSPOT:
0325 *value = metrics->SocTemperature / 100 *
0326 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
0327 break;
0328 case METRICS_THROTTLER_STATUS:
0329 *value = metrics->ThrottlerStatus;
0330 break;
0331 case METRICS_VOLTAGE_VDDGFX:
0332 *value = metrics->Voltage[2];
0333 break;
0334 case METRICS_VOLTAGE_VDDSOC:
0335 *value = metrics->Voltage[1];
0336 break;
0337 case METRICS_AVERAGE_CPUCLK:
0338 memcpy(value, &metrics->CoreFrequency[0],
0339 smu->cpu_core_num * sizeof(uint16_t));
0340 break;
0341 default:
0342 *value = UINT_MAX;
0343 break;
0344 }
0345
0346 return ret;
0347 }
0348
0349 static int vangogh_get_smu_metrics_data(struct smu_context *smu,
0350 MetricsMember_t member,
0351 uint32_t *value)
0352 {
0353 struct smu_table_context *smu_table = &smu->smu_table;
0354 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
0355 int ret = 0;
0356
0357 ret = smu_cmn_get_metrics_table(smu,
0358 NULL,
0359 false);
0360 if (ret)
0361 return ret;
0362
0363 switch (member) {
0364 case METRICS_CURR_GFXCLK:
0365 *value = metrics->Current.GfxclkFrequency;
0366 break;
0367 case METRICS_AVERAGE_SOCCLK:
0368 *value = metrics->Current.SocclkFrequency;
0369 break;
0370 case METRICS_AVERAGE_VCLK:
0371 *value = metrics->Current.VclkFrequency;
0372 break;
0373 case METRICS_AVERAGE_DCLK:
0374 *value = metrics->Current.DclkFrequency;
0375 break;
0376 case METRICS_CURR_UCLK:
0377 *value = metrics->Current.MemclkFrequency;
0378 break;
0379 case METRICS_AVERAGE_GFXACTIVITY:
0380 *value = metrics->Current.GfxActivity;
0381 break;
0382 case METRICS_AVERAGE_VCNACTIVITY:
0383 *value = metrics->Current.UvdActivity;
0384 break;
0385 case METRICS_AVERAGE_SOCKETPOWER:
0386 *value = (metrics->Current.CurrentSocketPower << 8) /
0387 1000;
0388 break;
0389 case METRICS_TEMPERATURE_EDGE:
0390 *value = metrics->Current.GfxTemperature / 100 *
0391 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
0392 break;
0393 case METRICS_TEMPERATURE_HOTSPOT:
0394 *value = metrics->Current.SocTemperature / 100 *
0395 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
0396 break;
0397 case METRICS_THROTTLER_STATUS:
0398 *value = metrics->Current.ThrottlerStatus;
0399 break;
0400 case METRICS_VOLTAGE_VDDGFX:
0401 *value = metrics->Current.Voltage[2];
0402 break;
0403 case METRICS_VOLTAGE_VDDSOC:
0404 *value = metrics->Current.Voltage[1];
0405 break;
0406 case METRICS_AVERAGE_CPUCLK:
0407 memcpy(value, &metrics->Current.CoreFrequency[0],
0408 smu->cpu_core_num * sizeof(uint16_t));
0409 break;
0410 default:
0411 *value = UINT_MAX;
0412 break;
0413 }
0414
0415 return ret;
0416 }
0417
0418 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
0419 MetricsMember_t member,
0420 uint32_t *value)
0421 {
0422 struct amdgpu_device *adev = smu->adev;
0423 uint32_t if_version;
0424 int ret = 0;
0425
0426 ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
0427 if (ret) {
0428 dev_err(adev->dev, "Failed to get smu if version!\n");
0429 return ret;
0430 }
0431
0432 if (if_version < 0x3)
0433 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
0434 else
0435 ret = vangogh_get_smu_metrics_data(smu, member, value);
0436
0437 return ret;
0438 }
0439
0440 static int vangogh_allocate_dpm_context(struct smu_context *smu)
0441 {
0442 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
0443
0444 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
0445 GFP_KERNEL);
0446 if (!smu_dpm->dpm_context)
0447 return -ENOMEM;
0448
0449 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
0450
0451 return 0;
0452 }
0453
0454 static int vangogh_init_smc_tables(struct smu_context *smu)
0455 {
0456 int ret = 0;
0457
0458 ret = vangogh_tables_init(smu);
0459 if (ret)
0460 return ret;
0461
0462 ret = vangogh_allocate_dpm_context(smu);
0463 if (ret)
0464 return ret;
0465
0466 #ifdef CONFIG_X86
0467
0468 smu->cpu_core_num = boot_cpu_data.x86_max_cores;
0469 #else
0470 smu->cpu_core_num = 4;
0471 #endif
0472
0473 return smu_v11_0_init_smc_tables(smu);
0474 }
0475
0476 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
0477 {
0478 int ret = 0;
0479
0480 if (enable) {
0481
0482 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
0483 if (ret)
0484 return ret;
0485 } else {
0486 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
0487 if (ret)
0488 return ret;
0489 }
0490
0491 return ret;
0492 }
0493
0494 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
0495 {
0496 int ret = 0;
0497
0498 if (enable) {
0499 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
0500 if (ret)
0501 return ret;
0502 } else {
0503 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
0504 if (ret)
0505 return ret;
0506 }
0507
0508 return ret;
0509 }
0510
0511 static bool vangogh_is_dpm_running(struct smu_context *smu)
0512 {
0513 struct amdgpu_device *adev = smu->adev;
0514 int ret = 0;
0515 uint64_t feature_enabled;
0516
0517
0518 if (adev->in_suspend)
0519 return false;
0520
0521 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
0522
0523 if (ret)
0524 return false;
0525
0526 return !!(feature_enabled & SMC_DPM_FEATURE);
0527 }
0528
0529 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
0530 uint32_t dpm_level, uint32_t *freq)
0531 {
0532 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
0533
0534 if (!clk_table || clk_type >= SMU_CLK_COUNT)
0535 return -EINVAL;
0536
0537 switch (clk_type) {
0538 case SMU_SOCCLK:
0539 if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
0540 return -EINVAL;
0541 *freq = clk_table->SocClocks[dpm_level];
0542 break;
0543 case SMU_VCLK:
0544 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
0545 return -EINVAL;
0546 *freq = clk_table->VcnClocks[dpm_level].vclk;
0547 break;
0548 case SMU_DCLK:
0549 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
0550 return -EINVAL;
0551 *freq = clk_table->VcnClocks[dpm_level].dclk;
0552 break;
0553 case SMU_UCLK:
0554 case SMU_MCLK:
0555 if (dpm_level >= clk_table->NumDfPstatesEnabled)
0556 return -EINVAL;
0557 *freq = clk_table->DfPstateTable[dpm_level].memclk;
0558
0559 break;
0560 case SMU_FCLK:
0561 if (dpm_level >= clk_table->NumDfPstatesEnabled)
0562 return -EINVAL;
0563 *freq = clk_table->DfPstateTable[dpm_level].fclk;
0564 break;
0565 default:
0566 return -EINVAL;
0567 }
0568
0569 return 0;
0570 }
0571
0572 static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
0573 enum smu_clk_type clk_type, char *buf)
0574 {
0575 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
0576 SmuMetrics_legacy_t metrics;
0577 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
0578 int i, size = 0, ret = 0;
0579 uint32_t cur_value = 0, value = 0, count = 0;
0580 bool cur_value_match_level = false;
0581
0582 memset(&metrics, 0, sizeof(metrics));
0583
0584 ret = smu_cmn_get_metrics_table(smu, &metrics, false);
0585 if (ret)
0586 return ret;
0587
0588 smu_cmn_get_sysfs_buf(&buf, &size);
0589
0590 switch (clk_type) {
0591 case SMU_OD_SCLK:
0592 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
0593 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
0594 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
0595 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
0596 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
0597 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
0598 }
0599 break;
0600 case SMU_OD_CCLK:
0601 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
0602 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
0603 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
0604 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
0605 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
0606 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
0607 }
0608 break;
0609 case SMU_OD_RANGE:
0610 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
0611 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
0612 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
0613 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
0614 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
0615 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
0616 }
0617 break;
0618 case SMU_SOCCLK:
0619
0620 count = clk_table->NumSocClkLevelsEnabled;
0621 cur_value = metrics.SocclkFrequency;
0622 break;
0623 case SMU_VCLK:
0624 count = clk_table->VcnClkLevelsEnabled;
0625 cur_value = metrics.VclkFrequency;
0626 break;
0627 case SMU_DCLK:
0628 count = clk_table->VcnClkLevelsEnabled;
0629 cur_value = metrics.DclkFrequency;
0630 break;
0631 case SMU_MCLK:
0632 count = clk_table->NumDfPstatesEnabled;
0633 cur_value = metrics.MemclkFrequency;
0634 break;
0635 case SMU_FCLK:
0636 count = clk_table->NumDfPstatesEnabled;
0637 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
0638 if (ret)
0639 return ret;
0640 break;
0641 default:
0642 break;
0643 }
0644
0645 switch (clk_type) {
0646 case SMU_SOCCLK:
0647 case SMU_VCLK:
0648 case SMU_DCLK:
0649 case SMU_MCLK:
0650 case SMU_FCLK:
0651 for (i = 0; i < count; i++) {
0652 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
0653 if (ret)
0654 return ret;
0655 if (!value)
0656 continue;
0657 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
0658 cur_value == value ? "*" : "");
0659 if (cur_value == value)
0660 cur_value_match_level = true;
0661 }
0662
0663 if (!cur_value_match_level)
0664 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
0665 break;
0666 default:
0667 break;
0668 }
0669
0670 return size;
0671 }
0672
0673 static int vangogh_print_clk_levels(struct smu_context *smu,
0674 enum smu_clk_type clk_type, char *buf)
0675 {
0676 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
0677 SmuMetrics_t metrics;
0678 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
0679 int i, size = 0, ret = 0;
0680 uint32_t cur_value = 0, value = 0, count = 0;
0681 bool cur_value_match_level = false;
0682 uint32_t min, max;
0683
0684 memset(&metrics, 0, sizeof(metrics));
0685
0686 ret = smu_cmn_get_metrics_table(smu, &metrics, false);
0687 if (ret)
0688 return ret;
0689
0690 smu_cmn_get_sysfs_buf(&buf, &size);
0691
0692 switch (clk_type) {
0693 case SMU_OD_SCLK:
0694 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
0695 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
0696 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
0697 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
0698 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
0699 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
0700 }
0701 break;
0702 case SMU_OD_CCLK:
0703 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
0704 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
0705 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
0706 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
0707 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
0708 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
0709 }
0710 break;
0711 case SMU_OD_RANGE:
0712 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
0713 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
0714 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
0715 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
0716 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
0717 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
0718 }
0719 break;
0720 case SMU_SOCCLK:
0721
0722 count = clk_table->NumSocClkLevelsEnabled;
0723 cur_value = metrics.Current.SocclkFrequency;
0724 break;
0725 case SMU_VCLK:
0726 count = clk_table->VcnClkLevelsEnabled;
0727 cur_value = metrics.Current.VclkFrequency;
0728 break;
0729 case SMU_DCLK:
0730 count = clk_table->VcnClkLevelsEnabled;
0731 cur_value = metrics.Current.DclkFrequency;
0732 break;
0733 case SMU_MCLK:
0734 count = clk_table->NumDfPstatesEnabled;
0735 cur_value = metrics.Current.MemclkFrequency;
0736 break;
0737 case SMU_FCLK:
0738 count = clk_table->NumDfPstatesEnabled;
0739 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
0740 if (ret)
0741 return ret;
0742 break;
0743 case SMU_GFXCLK:
0744 case SMU_SCLK:
0745 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
0746 if (ret) {
0747 return ret;
0748 }
0749 break;
0750 default:
0751 break;
0752 }
0753
0754 switch (clk_type) {
0755 case SMU_SOCCLK:
0756 case SMU_VCLK:
0757 case SMU_DCLK:
0758 case SMU_MCLK:
0759 case SMU_FCLK:
0760 for (i = 0; i < count; i++) {
0761 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
0762 if (ret)
0763 return ret;
0764 if (!value)
0765 continue;
0766 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
0767 cur_value == value ? "*" : "");
0768 if (cur_value == value)
0769 cur_value_match_level = true;
0770 }
0771
0772 if (!cur_value_match_level)
0773 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
0774 break;
0775 case SMU_GFXCLK:
0776 case SMU_SCLK:
0777 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
0778 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
0779 if (cur_value == max)
0780 i = 2;
0781 else if (cur_value == min)
0782 i = 0;
0783 else
0784 i = 1;
0785 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
0786 i == 0 ? "*" : "");
0787 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
0788 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
0789 i == 1 ? "*" : "");
0790 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
0791 i == 2 ? "*" : "");
0792 break;
0793 default:
0794 break;
0795 }
0796
0797 return size;
0798 }
0799
0800 static int vangogh_common_print_clk_levels(struct smu_context *smu,
0801 enum smu_clk_type clk_type, char *buf)
0802 {
0803 struct amdgpu_device *adev = smu->adev;
0804 uint32_t if_version;
0805 int ret = 0;
0806
0807 ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
0808 if (ret) {
0809 dev_err(adev->dev, "Failed to get smu if version!\n");
0810 return ret;
0811 }
0812
0813 if (if_version < 0x3)
0814 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
0815 else
0816 ret = vangogh_print_clk_levels(smu, clk_type, buf);
0817
0818 return ret;
0819 }
0820
0821 static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
0822 enum amd_dpm_forced_level level,
0823 uint32_t *vclk_mask,
0824 uint32_t *dclk_mask,
0825 uint32_t *mclk_mask,
0826 uint32_t *fclk_mask,
0827 uint32_t *soc_mask)
0828 {
0829 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
0830
0831 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
0832 if (mclk_mask)
0833 *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
0834
0835 if (fclk_mask)
0836 *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
0837
0838 if (soc_mask)
0839 *soc_mask = 0;
0840 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
0841 if (mclk_mask)
0842 *mclk_mask = 0;
0843
0844 if (fclk_mask)
0845 *fclk_mask = 0;
0846
0847 if (soc_mask)
0848 *soc_mask = 1;
0849
0850 if (vclk_mask)
0851 *vclk_mask = 1;
0852
0853 if (dclk_mask)
0854 *dclk_mask = 1;
0855 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
0856 if (mclk_mask)
0857 *mclk_mask = 0;
0858
0859 if (fclk_mask)
0860 *fclk_mask = 0;
0861
0862 if (soc_mask)
0863 *soc_mask = 1;
0864
0865 if (vclk_mask)
0866 *vclk_mask = 1;
0867
0868 if (dclk_mask)
0869 *dclk_mask = 1;
0870 }
0871
0872 return 0;
0873 }
0874
0875 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
0876 enum smu_clk_type clk_type)
0877 {
0878 enum smu_feature_mask feature_id = 0;
0879
0880 switch (clk_type) {
0881 case SMU_MCLK:
0882 case SMU_UCLK:
0883 case SMU_FCLK:
0884 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
0885 break;
0886 case SMU_GFXCLK:
0887 case SMU_SCLK:
0888 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
0889 break;
0890 case SMU_SOCCLK:
0891 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
0892 break;
0893 case SMU_VCLK:
0894 case SMU_DCLK:
0895 feature_id = SMU_FEATURE_VCN_DPM_BIT;
0896 break;
0897 default:
0898 return true;
0899 }
0900
0901 if (!smu_cmn_feature_is_enabled(smu, feature_id))
0902 return false;
0903
0904 return true;
0905 }
0906
0907 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
0908 enum smu_clk_type clk_type,
0909 uint32_t *min,
0910 uint32_t *max)
0911 {
0912 int ret = 0;
0913 uint32_t soc_mask;
0914 uint32_t vclk_mask;
0915 uint32_t dclk_mask;
0916 uint32_t mclk_mask;
0917 uint32_t fclk_mask;
0918 uint32_t clock_limit;
0919
0920 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
0921 switch (clk_type) {
0922 case SMU_MCLK:
0923 case SMU_UCLK:
0924 clock_limit = smu->smu_table.boot_values.uclk;
0925 break;
0926 case SMU_FCLK:
0927 clock_limit = smu->smu_table.boot_values.fclk;
0928 break;
0929 case SMU_GFXCLK:
0930 case SMU_SCLK:
0931 clock_limit = smu->smu_table.boot_values.gfxclk;
0932 break;
0933 case SMU_SOCCLK:
0934 clock_limit = smu->smu_table.boot_values.socclk;
0935 break;
0936 case SMU_VCLK:
0937 clock_limit = smu->smu_table.boot_values.vclk;
0938 break;
0939 case SMU_DCLK:
0940 clock_limit = smu->smu_table.boot_values.dclk;
0941 break;
0942 default:
0943 clock_limit = 0;
0944 break;
0945 }
0946
0947
0948 if (min)
0949 *min = clock_limit / 100;
0950 if (max)
0951 *max = clock_limit / 100;
0952
0953 return 0;
0954 }
0955 if (max) {
0956 ret = vangogh_get_profiling_clk_mask(smu,
0957 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
0958 &vclk_mask,
0959 &dclk_mask,
0960 &mclk_mask,
0961 &fclk_mask,
0962 &soc_mask);
0963 if (ret)
0964 goto failed;
0965
0966 switch (clk_type) {
0967 case SMU_UCLK:
0968 case SMU_MCLK:
0969 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
0970 if (ret)
0971 goto failed;
0972 break;
0973 case SMU_SOCCLK:
0974 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
0975 if (ret)
0976 goto failed;
0977 break;
0978 case SMU_FCLK:
0979 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
0980 if (ret)
0981 goto failed;
0982 break;
0983 case SMU_VCLK:
0984 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
0985 if (ret)
0986 goto failed;
0987 break;
0988 case SMU_DCLK:
0989 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
0990 if (ret)
0991 goto failed;
0992 break;
0993 default:
0994 ret = -EINVAL;
0995 goto failed;
0996 }
0997 }
0998 if (min) {
0999 switch (clk_type) {
1000 case SMU_UCLK:
1001 case SMU_MCLK:
1002 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
1003 if (ret)
1004 goto failed;
1005 break;
1006 case SMU_SOCCLK:
1007 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
1008 if (ret)
1009 goto failed;
1010 break;
1011 case SMU_FCLK:
1012 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
1013 if (ret)
1014 goto failed;
1015 break;
1016 case SMU_VCLK:
1017 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
1018 if (ret)
1019 goto failed;
1020 break;
1021 case SMU_DCLK:
1022 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
1023 if (ret)
1024 goto failed;
1025 break;
1026 default:
1027 ret = -EINVAL;
1028 goto failed;
1029 }
1030 }
1031 failed:
1032 return ret;
1033 }
1034
1035 static int vangogh_get_power_profile_mode(struct smu_context *smu,
1036 char *buf)
1037 {
1038 uint32_t i, size = 0;
1039 int16_t workload_type = 0;
1040
1041 if (!buf)
1042 return -EINVAL;
1043
1044 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1045
1046
1047
1048
1049 workload_type = smu_cmn_to_asic_specific_index(smu,
1050 CMN2ASIC_MAPPING_WORKLOAD,
1051 i);
1052
1053 if (workload_type < 0)
1054 continue;
1055
1056 size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
1057 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1058 }
1059
1060 return size;
1061 }
1062
1063 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1064 {
1065 int workload_type, ret;
1066 uint32_t profile_mode = input[size];
1067
1068 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1069 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
1070 return -EINVAL;
1071 }
1072
1073 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
1074 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
1075 return 0;
1076
1077
1078 workload_type = smu_cmn_to_asic_specific_index(smu,
1079 CMN2ASIC_MAPPING_WORKLOAD,
1080 profile_mode);
1081 if (workload_type < 0) {
1082 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
1083 profile_mode);
1084 return -EINVAL;
1085 }
1086
1087 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1088 1 << workload_type,
1089 NULL);
1090 if (ret) {
1091 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
1092 workload_type);
1093 return ret;
1094 }
1095
1096 smu->power_profile_mode = profile_mode;
1097
1098 return 0;
1099 }
1100
1101 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
1102 enum smu_clk_type clk_type,
1103 uint32_t min,
1104 uint32_t max)
1105 {
1106 int ret = 0;
1107
1108 if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
1109 return 0;
1110
1111 switch (clk_type) {
1112 case SMU_GFXCLK:
1113 case SMU_SCLK:
1114 ret = smu_cmn_send_smc_msg_with_param(smu,
1115 SMU_MSG_SetHardMinGfxClk,
1116 min, NULL);
1117 if (ret)
1118 return ret;
1119
1120 ret = smu_cmn_send_smc_msg_with_param(smu,
1121 SMU_MSG_SetSoftMaxGfxClk,
1122 max, NULL);
1123 if (ret)
1124 return ret;
1125 break;
1126 case SMU_FCLK:
1127 ret = smu_cmn_send_smc_msg_with_param(smu,
1128 SMU_MSG_SetHardMinFclkByFreq,
1129 min, NULL);
1130 if (ret)
1131 return ret;
1132
1133 ret = smu_cmn_send_smc_msg_with_param(smu,
1134 SMU_MSG_SetSoftMaxFclkByFreq,
1135 max, NULL);
1136 if (ret)
1137 return ret;
1138 break;
1139 case SMU_SOCCLK:
1140 ret = smu_cmn_send_smc_msg_with_param(smu,
1141 SMU_MSG_SetHardMinSocclkByFreq,
1142 min, NULL);
1143 if (ret)
1144 return ret;
1145
1146 ret = smu_cmn_send_smc_msg_with_param(smu,
1147 SMU_MSG_SetSoftMaxSocclkByFreq,
1148 max, NULL);
1149 if (ret)
1150 return ret;
1151 break;
1152 case SMU_VCLK:
1153 ret = smu_cmn_send_smc_msg_with_param(smu,
1154 SMU_MSG_SetHardMinVcn,
1155 min << 16, NULL);
1156 if (ret)
1157 return ret;
1158 ret = smu_cmn_send_smc_msg_with_param(smu,
1159 SMU_MSG_SetSoftMaxVcn,
1160 max << 16, NULL);
1161 if (ret)
1162 return ret;
1163 break;
1164 case SMU_DCLK:
1165 ret = smu_cmn_send_smc_msg_with_param(smu,
1166 SMU_MSG_SetHardMinVcn,
1167 min, NULL);
1168 if (ret)
1169 return ret;
1170 ret = smu_cmn_send_smc_msg_with_param(smu,
1171 SMU_MSG_SetSoftMaxVcn,
1172 max, NULL);
1173 if (ret)
1174 return ret;
1175 break;
1176 default:
1177 return -EINVAL;
1178 }
1179
1180 return ret;
1181 }
1182
1183 static int vangogh_force_clk_levels(struct smu_context *smu,
1184 enum smu_clk_type clk_type, uint32_t mask)
1185 {
1186 uint32_t soft_min_level = 0, soft_max_level = 0;
1187 uint32_t min_freq = 0, max_freq = 0;
1188 int ret = 0 ;
1189
1190 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1191 soft_max_level = mask ? (fls(mask) - 1) : 0;
1192
1193 switch (clk_type) {
1194 case SMU_SOCCLK:
1195 ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1196 soft_min_level, &min_freq);
1197 if (ret)
1198 return ret;
1199 ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1200 soft_max_level, &max_freq);
1201 if (ret)
1202 return ret;
1203 ret = smu_cmn_send_smc_msg_with_param(smu,
1204 SMU_MSG_SetSoftMaxSocclkByFreq,
1205 max_freq, NULL);
1206 if (ret)
1207 return ret;
1208 ret = smu_cmn_send_smc_msg_with_param(smu,
1209 SMU_MSG_SetHardMinSocclkByFreq,
1210 min_freq, NULL);
1211 if (ret)
1212 return ret;
1213 break;
1214 case SMU_FCLK:
1215 ret = vangogh_get_dpm_clk_limited(smu,
1216 clk_type, soft_min_level, &min_freq);
1217 if (ret)
1218 return ret;
1219 ret = vangogh_get_dpm_clk_limited(smu,
1220 clk_type, soft_max_level, &max_freq);
1221 if (ret)
1222 return ret;
1223 ret = smu_cmn_send_smc_msg_with_param(smu,
1224 SMU_MSG_SetSoftMaxFclkByFreq,
1225 max_freq, NULL);
1226 if (ret)
1227 return ret;
1228 ret = smu_cmn_send_smc_msg_with_param(smu,
1229 SMU_MSG_SetHardMinFclkByFreq,
1230 min_freq, NULL);
1231 if (ret)
1232 return ret;
1233 break;
1234 case SMU_VCLK:
1235 ret = vangogh_get_dpm_clk_limited(smu,
1236 clk_type, soft_min_level, &min_freq);
1237 if (ret)
1238 return ret;
1239
1240 ret = vangogh_get_dpm_clk_limited(smu,
1241 clk_type, soft_max_level, &max_freq);
1242 if (ret)
1243 return ret;
1244
1245
1246 ret = smu_cmn_send_smc_msg_with_param(smu,
1247 SMU_MSG_SetHardMinVcn,
1248 min_freq << 16, NULL);
1249 if (ret)
1250 return ret;
1251
1252 ret = smu_cmn_send_smc_msg_with_param(smu,
1253 SMU_MSG_SetSoftMaxVcn,
1254 max_freq << 16, NULL);
1255 if (ret)
1256 return ret;
1257
1258 break;
1259 case SMU_DCLK:
1260 ret = vangogh_get_dpm_clk_limited(smu,
1261 clk_type, soft_min_level, &min_freq);
1262 if (ret)
1263 return ret;
1264
1265 ret = vangogh_get_dpm_clk_limited(smu,
1266 clk_type, soft_max_level, &max_freq);
1267 if (ret)
1268 return ret;
1269
1270 ret = smu_cmn_send_smc_msg_with_param(smu,
1271 SMU_MSG_SetHardMinVcn,
1272 min_freq, NULL);
1273 if (ret)
1274 return ret;
1275
1276 ret = smu_cmn_send_smc_msg_with_param(smu,
1277 SMU_MSG_SetSoftMaxVcn,
1278 max_freq, NULL);
1279 if (ret)
1280 return ret;
1281
1282 break;
1283 default:
1284 break;
1285 }
1286
1287 return ret;
1288 }
1289
1290 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
1291 {
1292 int ret = 0, i = 0;
1293 uint32_t min_freq, max_freq, force_freq;
1294 enum smu_clk_type clk_type;
1295
1296 enum smu_clk_type clks[] = {
1297 SMU_SOCCLK,
1298 SMU_VCLK,
1299 SMU_DCLK,
1300 SMU_FCLK,
1301 };
1302
1303 for (i = 0; i < ARRAY_SIZE(clks); i++) {
1304 clk_type = clks[i];
1305 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1306 if (ret)
1307 return ret;
1308
1309 force_freq = highest ? max_freq : min_freq;
1310 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
1311 if (ret)
1312 return ret;
1313 }
1314
1315 return ret;
1316 }
1317
1318 static int vangogh_unforce_dpm_levels(struct smu_context *smu)
1319 {
1320 int ret = 0, i = 0;
1321 uint32_t min_freq, max_freq;
1322 enum smu_clk_type clk_type;
1323
1324 struct clk_feature_map {
1325 enum smu_clk_type clk_type;
1326 uint32_t feature;
1327 } clk_feature_map[] = {
1328 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
1329 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
1330 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
1331 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
1332 };
1333
1334 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
1335
1336 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
1337 continue;
1338
1339 clk_type = clk_feature_map[i].clk_type;
1340
1341 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1342
1343 if (ret)
1344 return ret;
1345
1346 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1347
1348 if (ret)
1349 return ret;
1350 }
1351
1352 return ret;
1353 }
1354
1355 static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
1356 {
1357 int ret = 0;
1358 uint32_t socclk_freq = 0, fclk_freq = 0;
1359 uint32_t vclk_freq = 0, dclk_freq = 0;
1360
1361 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
1362 if (ret)
1363 return ret;
1364
1365 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
1366 if (ret)
1367 return ret;
1368
1369 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
1370 if (ret)
1371 return ret;
1372
1373 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
1374 if (ret)
1375 return ret;
1376
1377 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
1378 if (ret)
1379 return ret;
1380
1381 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
1382 if (ret)
1383 return ret;
1384
1385 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
1386 if (ret)
1387 return ret;
1388
1389 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
1390 if (ret)
1391 return ret;
1392
1393 return ret;
1394 }
1395
1396 static int vangogh_set_performance_level(struct smu_context *smu,
1397 enum amd_dpm_forced_level level)
1398 {
1399 int ret = 0, i;
1400 uint32_t soc_mask, mclk_mask, fclk_mask;
1401 uint32_t vclk_mask = 0, dclk_mask = 0;
1402
1403 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1404 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1405
1406 switch (level) {
1407 case AMD_DPM_FORCED_LEVEL_HIGH:
1408 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
1409 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1410
1411
1412 ret = vangogh_force_dpm_limit_value(smu, true);
1413 if (ret)
1414 return ret;
1415 break;
1416 case AMD_DPM_FORCED_LEVEL_LOW:
1417 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1418 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1419
1420 ret = vangogh_force_dpm_limit_value(smu, false);
1421 if (ret)
1422 return ret;
1423 break;
1424 case AMD_DPM_FORCED_LEVEL_AUTO:
1425 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1426 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1427
1428 ret = vangogh_unforce_dpm_levels(smu);
1429 if (ret)
1430 return ret;
1431 break;
1432 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1433 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1434 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1435
1436 ret = vangogh_get_profiling_clk_mask(smu, level,
1437 &vclk_mask,
1438 &dclk_mask,
1439 &mclk_mask,
1440 &fclk_mask,
1441 &soc_mask);
1442 if (ret)
1443 return ret;
1444
1445 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1446 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1447 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
1448 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
1449 break;
1450 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1451 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1452 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1453 break;
1454 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1455 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1456 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1457
1458 ret = vangogh_get_profiling_clk_mask(smu, level,
1459 NULL,
1460 NULL,
1461 &mclk_mask,
1462 &fclk_mask,
1463 NULL);
1464 if (ret)
1465 return ret;
1466
1467 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1468 break;
1469 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1470 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1471 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1472
1473 ret = vangogh_set_peak_clock_by_device(smu);
1474 if (ret)
1475 return ret;
1476 break;
1477 case AMD_DPM_FORCED_LEVEL_MANUAL:
1478 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1479 default:
1480 return 0;
1481 }
1482
1483 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1484 smu->gfx_actual_hard_min_freq, NULL);
1485 if (ret)
1486 return ret;
1487
1488 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1489 smu->gfx_actual_soft_max_freq, NULL);
1490 if (ret)
1491 return ret;
1492
1493 if (smu->adev->pm.fw_version >= 0x43f1b00) {
1494 for (i = 0; i < smu->cpu_core_num; i++) {
1495 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1496 ((i << 20)
1497 | smu->cpu_actual_soft_min_freq),
1498 NULL);
1499 if (ret)
1500 return ret;
1501
1502 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1503 ((i << 20)
1504 | smu->cpu_actual_soft_max_freq),
1505 NULL);
1506 if (ret)
1507 return ret;
1508 }
1509 }
1510
1511 return ret;
1512 }
1513
1514 static int vangogh_read_sensor(struct smu_context *smu,
1515 enum amd_pp_sensors sensor,
1516 void *data, uint32_t *size)
1517 {
1518 int ret = 0;
1519
1520 if (!data || !size)
1521 return -EINVAL;
1522
1523 switch (sensor) {
1524 case AMDGPU_PP_SENSOR_GPU_LOAD:
1525 ret = vangogh_common_get_smu_metrics_data(smu,
1526 METRICS_AVERAGE_GFXACTIVITY,
1527 (uint32_t *)data);
1528 *size = 4;
1529 break;
1530 case AMDGPU_PP_SENSOR_GPU_POWER:
1531 ret = vangogh_common_get_smu_metrics_data(smu,
1532 METRICS_AVERAGE_SOCKETPOWER,
1533 (uint32_t *)data);
1534 *size = 4;
1535 break;
1536 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1537 ret = vangogh_common_get_smu_metrics_data(smu,
1538 METRICS_TEMPERATURE_EDGE,
1539 (uint32_t *)data);
1540 *size = 4;
1541 break;
1542 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1543 ret = vangogh_common_get_smu_metrics_data(smu,
1544 METRICS_TEMPERATURE_HOTSPOT,
1545 (uint32_t *)data);
1546 *size = 4;
1547 break;
1548 case AMDGPU_PP_SENSOR_GFX_MCLK:
1549 ret = vangogh_common_get_smu_metrics_data(smu,
1550 METRICS_CURR_UCLK,
1551 (uint32_t *)data);
1552 *(uint32_t *)data *= 100;
1553 *size = 4;
1554 break;
1555 case AMDGPU_PP_SENSOR_GFX_SCLK:
1556 ret = vangogh_common_get_smu_metrics_data(smu,
1557 METRICS_CURR_GFXCLK,
1558 (uint32_t *)data);
1559 *(uint32_t *)data *= 100;
1560 *size = 4;
1561 break;
1562 case AMDGPU_PP_SENSOR_VDDGFX:
1563 ret = vangogh_common_get_smu_metrics_data(smu,
1564 METRICS_VOLTAGE_VDDGFX,
1565 (uint32_t *)data);
1566 *size = 4;
1567 break;
1568 case AMDGPU_PP_SENSOR_VDDNB:
1569 ret = vangogh_common_get_smu_metrics_data(smu,
1570 METRICS_VOLTAGE_VDDSOC,
1571 (uint32_t *)data);
1572 *size = 4;
1573 break;
1574 case AMDGPU_PP_SENSOR_CPU_CLK:
1575 ret = vangogh_common_get_smu_metrics_data(smu,
1576 METRICS_AVERAGE_CPUCLK,
1577 (uint32_t *)data);
1578 *size = smu->cpu_core_num * sizeof(uint16_t);
1579 break;
1580 default:
1581 ret = -EOPNOTSUPP;
1582 break;
1583 }
1584
1585 return ret;
1586 }
1587
1588 static int vangogh_set_watermarks_table(struct smu_context *smu,
1589 struct pp_smu_wm_range_sets *clock_ranges)
1590 {
1591 int i;
1592 int ret = 0;
1593 Watermarks_t *table = smu->smu_table.watermarks_table;
1594
1595 if (!table || !clock_ranges)
1596 return -EINVAL;
1597
1598 if (clock_ranges) {
1599 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1600 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1601 return -EINVAL;
1602
1603 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1604 table->WatermarkRow[WM_DCFCLK][i].MinClock =
1605 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1606 table->WatermarkRow[WM_DCFCLK][i].MaxClock =
1607 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1608 table->WatermarkRow[WM_DCFCLK][i].MinMclk =
1609 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1610 table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
1611 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1612
1613 table->WatermarkRow[WM_DCFCLK][i].WmSetting =
1614 clock_ranges->reader_wm_sets[i].wm_inst;
1615 }
1616
1617 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1618 table->WatermarkRow[WM_SOCCLK][i].MinClock =
1619 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1620 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1621 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1622 table->WatermarkRow[WM_SOCCLK][i].MinMclk =
1623 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1624 table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
1625 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1626
1627 table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1628 clock_ranges->writer_wm_sets[i].wm_inst;
1629 }
1630
1631 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1632 }
1633
1634
1635 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1636 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1637 ret = smu_cmn_write_watermarks_table(smu);
1638 if (ret) {
1639 dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1640 return ret;
1641 }
1642 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1643 }
1644
1645 return 0;
1646 }
1647
1648 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
1649 void **table)
1650 {
1651 struct smu_table_context *smu_table = &smu->smu_table;
1652 struct gpu_metrics_v2_2 *gpu_metrics =
1653 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1654 SmuMetrics_legacy_t metrics;
1655 int ret = 0;
1656
1657 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1658 if (ret)
1659 return ret;
1660
1661 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1662
1663 gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1664 gpu_metrics->temperature_soc = metrics.SocTemperature;
1665 memcpy(&gpu_metrics->temperature_core[0],
1666 &metrics.CoreTemperature[0],
1667 sizeof(uint16_t) * 4);
1668 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1669
1670 gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1671 gpu_metrics->average_mm_activity = metrics.UvdActivity;
1672
1673 gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1674 gpu_metrics->average_cpu_power = metrics.Power[0];
1675 gpu_metrics->average_soc_power = metrics.Power[1];
1676 gpu_metrics->average_gfx_power = metrics.Power[2];
1677 memcpy(&gpu_metrics->average_core_power[0],
1678 &metrics.CorePower[0],
1679 sizeof(uint16_t) * 4);
1680
1681 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1682 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1683 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1684 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1685 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1686 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1687
1688 memcpy(&gpu_metrics->current_coreclk[0],
1689 &metrics.CoreFrequency[0],
1690 sizeof(uint16_t) * 4);
1691 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1692
1693 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1694 gpu_metrics->indep_throttle_status =
1695 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1696 vangogh_throttler_map);
1697
1698 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1699
1700 *table = (void *)gpu_metrics;
1701
1702 return sizeof(struct gpu_metrics_v2_2);
1703 }
1704
1705 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
1706 void **table)
1707 {
1708 struct smu_table_context *smu_table = &smu->smu_table;
1709 struct gpu_metrics_v2_2 *gpu_metrics =
1710 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1711 SmuMetrics_t metrics;
1712 int ret = 0;
1713
1714 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1715 if (ret)
1716 return ret;
1717
1718 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1719
1720 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1721 gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1722 memcpy(&gpu_metrics->temperature_core[0],
1723 &metrics.Current.CoreTemperature[0],
1724 sizeof(uint16_t) * 4);
1725 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1726
1727 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1728 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1729
1730 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1731 gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1732 gpu_metrics->average_soc_power = metrics.Current.Power[1];
1733 gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1734 memcpy(&gpu_metrics->average_core_power[0],
1735 &metrics.Average.CorePower[0],
1736 sizeof(uint16_t) * 4);
1737
1738 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1739 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1740 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1741 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1742 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1743 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1744
1745 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1746 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1747 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1748 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1749 gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1750 gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1751
1752 memcpy(&gpu_metrics->current_coreclk[0],
1753 &metrics.Current.CoreFrequency[0],
1754 sizeof(uint16_t) * 4);
1755 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1756
1757 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1758 gpu_metrics->indep_throttle_status =
1759 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1760 vangogh_throttler_map);
1761
1762 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1763
1764 *table = (void *)gpu_metrics;
1765
1766 return sizeof(struct gpu_metrics_v2_2);
1767 }
1768
1769 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
1770 void **table)
1771 {
1772 struct amdgpu_device *adev = smu->adev;
1773 uint32_t if_version;
1774 int ret = 0;
1775
1776 ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
1777 if (ret) {
1778 dev_err(adev->dev, "Failed to get smu if version!\n");
1779 return ret;
1780 }
1781
1782 if (if_version < 0x3)
1783 ret = vangogh_get_legacy_gpu_metrics(smu, table);
1784 else
1785 ret = vangogh_get_gpu_metrics(smu, table);
1786
1787 return ret;
1788 }
1789
1790 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1791 long input[], uint32_t size)
1792 {
1793 int ret = 0;
1794 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1795
1796 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
1797 dev_warn(smu->adev->dev,
1798 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
1799 return -EINVAL;
1800 }
1801
1802 switch (type) {
1803 case PP_OD_EDIT_CCLK_VDDC_TABLE:
1804 if (size != 3) {
1805 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
1806 return -EINVAL;
1807 }
1808 if (input[0] >= smu->cpu_core_num) {
1809 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
1810 smu->cpu_core_num);
1811 }
1812 smu->cpu_core_id_select = input[0];
1813 if (input[1] == 0) {
1814 if (input[2] < smu->cpu_default_soft_min_freq) {
1815 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1816 input[2], smu->cpu_default_soft_min_freq);
1817 return -EINVAL;
1818 }
1819 smu->cpu_actual_soft_min_freq = input[2];
1820 } else if (input[1] == 1) {
1821 if (input[2] > smu->cpu_default_soft_max_freq) {
1822 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1823 input[2], smu->cpu_default_soft_max_freq);
1824 return -EINVAL;
1825 }
1826 smu->cpu_actual_soft_max_freq = input[2];
1827 } else {
1828 return -EINVAL;
1829 }
1830 break;
1831 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1832 if (size != 2) {
1833 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1834 return -EINVAL;
1835 }
1836
1837 if (input[0] == 0) {
1838 if (input[1] < smu->gfx_default_hard_min_freq) {
1839 dev_warn(smu->adev->dev,
1840 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1841 input[1], smu->gfx_default_hard_min_freq);
1842 return -EINVAL;
1843 }
1844 smu->gfx_actual_hard_min_freq = input[1];
1845 } else if (input[0] == 1) {
1846 if (input[1] > smu->gfx_default_soft_max_freq) {
1847 dev_warn(smu->adev->dev,
1848 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1849 input[1], smu->gfx_default_soft_max_freq);
1850 return -EINVAL;
1851 }
1852 smu->gfx_actual_soft_max_freq = input[1];
1853 } else {
1854 return -EINVAL;
1855 }
1856 break;
1857 case PP_OD_RESTORE_DEFAULT_TABLE:
1858 if (size != 0) {
1859 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1860 return -EINVAL;
1861 } else {
1862 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1863 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1864 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1865 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1866 }
1867 break;
1868 case PP_OD_COMMIT_DPM_TABLE:
1869 if (size != 0) {
1870 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1871 return -EINVAL;
1872 } else {
1873 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
1874 dev_err(smu->adev->dev,
1875 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1876 smu->gfx_actual_hard_min_freq,
1877 smu->gfx_actual_soft_max_freq);
1878 return -EINVAL;
1879 }
1880
1881 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1882 smu->gfx_actual_hard_min_freq, NULL);
1883 if (ret) {
1884 dev_err(smu->adev->dev, "Set hard min sclk failed!");
1885 return ret;
1886 }
1887
1888 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1889 smu->gfx_actual_soft_max_freq, NULL);
1890 if (ret) {
1891 dev_err(smu->adev->dev, "Set soft max sclk failed!");
1892 return ret;
1893 }
1894
1895 if (smu->adev->pm.fw_version < 0x43f1b00) {
1896 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
1897 break;
1898 }
1899
1900 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1901 ((smu->cpu_core_id_select << 20)
1902 | smu->cpu_actual_soft_min_freq),
1903 NULL);
1904 if (ret) {
1905 dev_err(smu->adev->dev, "Set hard min cclk failed!");
1906 return ret;
1907 }
1908
1909 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1910 ((smu->cpu_core_id_select << 20)
1911 | smu->cpu_actual_soft_max_freq),
1912 NULL);
1913 if (ret) {
1914 dev_err(smu->adev->dev, "Set soft max cclk failed!");
1915 return ret;
1916 }
1917 }
1918 break;
1919 default:
1920 return -ENOSYS;
1921 }
1922
1923 return ret;
1924 }
1925
1926 static int vangogh_set_default_dpm_tables(struct smu_context *smu)
1927 {
1928 struct smu_table_context *smu_table = &smu->smu_table;
1929
1930 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
1931 }
1932
1933 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
1934 {
1935 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1936
1937 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1938 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1939 smu->gfx_actual_hard_min_freq = 0;
1940 smu->gfx_actual_soft_max_freq = 0;
1941
1942 smu->cpu_default_soft_min_freq = 1400;
1943 smu->cpu_default_soft_max_freq = 3500;
1944 smu->cpu_actual_soft_min_freq = 0;
1945 smu->cpu_actual_soft_max_freq = 0;
1946
1947 return 0;
1948 }
1949
1950 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
1951 {
1952 DpmClocks_t *table = smu->smu_table.clocks_table;
1953 int i;
1954
1955 if (!clock_table || !table)
1956 return -EINVAL;
1957
1958 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
1959 clock_table->SocClocks[i].Freq = table->SocClocks[i];
1960 clock_table->SocClocks[i].Vol = table->SocVoltage[i];
1961 }
1962
1963 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1964 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
1965 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
1966 }
1967
1968 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1969 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
1970 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
1971 }
1972
1973 return 0;
1974 }
1975
1976
1977 static int vangogh_system_features_control(struct smu_context *smu, bool en)
1978 {
1979 struct amdgpu_device *adev = smu->adev;
1980 int ret = 0;
1981
1982 if (adev->pm.fw_version >= 0x43f1700 && !en)
1983 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
1984 RLC_STATUS_OFF, NULL);
1985
1986 return ret;
1987 }
1988
1989 static int vangogh_post_smu_init(struct smu_context *smu)
1990 {
1991 struct amdgpu_device *adev = smu->adev;
1992 uint32_t tmp;
1993 int ret = 0;
1994 uint8_t aon_bits = 0;
1995
1996 uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
1997 uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
1998 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
1999
2000
2001 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
2002 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
2003 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
2004 if (ret) {
2005 dev_err(adev->dev, "Failed to Enable GfxOff!\n");
2006 return ret;
2007 }
2008 } else {
2009 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2010 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
2011 }
2012
2013
2014 if (total_cu == adev->gfx.cu_info.number)
2015 return 0;
2016
2017
2018
2019
2020
2021 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
2022 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
2023
2024 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2025
2026
2027 if (aon_bits > req_active_wgps) {
2028 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
2029 return 0;
2030 } else {
2031 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
2032 }
2033 }
2034
2035 static int vangogh_mode_reset(struct smu_context *smu, int type)
2036 {
2037 int ret = 0, index = 0;
2038
2039 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2040 SMU_MSG_GfxDeviceDriverReset);
2041 if (index < 0)
2042 return index == -EACCES ? 0 : index;
2043
2044 mutex_lock(&smu->message_lock);
2045
2046 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
2047
2048 mutex_unlock(&smu->message_lock);
2049
2050 mdelay(10);
2051
2052 return ret;
2053 }
2054
2055 static int vangogh_mode2_reset(struct smu_context *smu)
2056 {
2057 return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
2074 {
2075 struct amdgpu_device *adev = smu->adev;
2076 u32 reg, gfxoff_status;
2077
2078 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
2079 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
2080 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
2081
2082 return gfxoff_status;
2083 }
2084
2085 static int vangogh_get_power_limit(struct smu_context *smu,
2086 uint32_t *current_power_limit,
2087 uint32_t *default_power_limit,
2088 uint32_t *max_power_limit)
2089 {
2090 struct smu_11_5_power_context *power_context =
2091 smu->smu_power.power_context;
2092 uint32_t ppt_limit;
2093 int ret = 0;
2094
2095 if (smu->adev->pm.fw_version < 0x43f1e00)
2096 return ret;
2097
2098 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
2099 if (ret) {
2100 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
2101 return ret;
2102 }
2103
2104 if (current_power_limit)
2105 *current_power_limit = ppt_limit / 1000;
2106 if (default_power_limit)
2107 *default_power_limit = ppt_limit / 1000;
2108 if (max_power_limit)
2109 *max_power_limit = 29;
2110
2111 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
2112 if (ret) {
2113 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
2114 return ret;
2115 }
2116
2117 power_context->current_fast_ppt_limit =
2118 power_context->default_fast_ppt_limit = ppt_limit / 1000;
2119 power_context->max_fast_ppt_limit = 30;
2120
2121 return ret;
2122 }
2123
2124 static int vangogh_get_ppt_limit(struct smu_context *smu,
2125 uint32_t *ppt_limit,
2126 enum smu_ppt_limit_type type,
2127 enum smu_ppt_limit_level level)
2128 {
2129 struct smu_11_5_power_context *power_context =
2130 smu->smu_power.power_context;
2131
2132 if (!power_context)
2133 return -EOPNOTSUPP;
2134
2135 if (type == SMU_FAST_PPT_LIMIT) {
2136 switch (level) {
2137 case SMU_PPT_LIMIT_MAX:
2138 *ppt_limit = power_context->max_fast_ppt_limit;
2139 break;
2140 case SMU_PPT_LIMIT_CURRENT:
2141 *ppt_limit = power_context->current_fast_ppt_limit;
2142 break;
2143 case SMU_PPT_LIMIT_DEFAULT:
2144 *ppt_limit = power_context->default_fast_ppt_limit;
2145 break;
2146 default:
2147 break;
2148 }
2149 }
2150
2151 return 0;
2152 }
2153
2154 static int vangogh_set_power_limit(struct smu_context *smu,
2155 enum smu_ppt_limit_type limit_type,
2156 uint32_t ppt_limit)
2157 {
2158 struct smu_11_5_power_context *power_context =
2159 smu->smu_power.power_context;
2160 int ret = 0;
2161
2162 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
2163 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
2164 return -EOPNOTSUPP;
2165 }
2166
2167 switch (limit_type) {
2168 case SMU_DEFAULT_PPT_LIMIT:
2169 ret = smu_cmn_send_smc_msg_with_param(smu,
2170 SMU_MSG_SetSlowPPTLimit,
2171 ppt_limit * 1000,
2172 NULL);
2173 if (ret)
2174 return ret;
2175
2176 smu->current_power_limit = ppt_limit;
2177 break;
2178 case SMU_FAST_PPT_LIMIT:
2179 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
2180 if (ppt_limit > power_context->max_fast_ppt_limit) {
2181 dev_err(smu->adev->dev,
2182 "New power limit (%d) is over the max allowed %d\n",
2183 ppt_limit, power_context->max_fast_ppt_limit);
2184 return ret;
2185 }
2186
2187 ret = smu_cmn_send_smc_msg_with_param(smu,
2188 SMU_MSG_SetFastPPTLimit,
2189 ppt_limit * 1000,
2190 NULL);
2191 if (ret)
2192 return ret;
2193
2194 power_context->current_fast_ppt_limit = ppt_limit;
2195 break;
2196 default:
2197 return -EINVAL;
2198 }
2199
2200 return ret;
2201 }
2202
2203 static const struct pptable_funcs vangogh_ppt_funcs = {
2204
2205 .check_fw_status = smu_v11_0_check_fw_status,
2206 .check_fw_version = smu_v11_0_check_fw_version,
2207 .init_smc_tables = vangogh_init_smc_tables,
2208 .fini_smc_tables = smu_v11_0_fini_smc_tables,
2209 .init_power = smu_v11_0_init_power,
2210 .fini_power = smu_v11_0_fini_power,
2211 .register_irq_handler = smu_v11_0_register_irq_handler,
2212 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2213 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2214 .send_smc_msg = smu_cmn_send_smc_msg,
2215 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable,
2216 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
2217 .is_dpm_running = vangogh_is_dpm_running,
2218 .read_sensor = vangogh_read_sensor,
2219 .get_enabled_mask = smu_cmn_get_enabled_mask,
2220 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2221 .set_watermarks_table = vangogh_set_watermarks_table,
2222 .set_driver_table_location = smu_v11_0_set_driver_table_location,
2223 .interrupt_work = smu_v11_0_interrupt_work,
2224 .get_gpu_metrics = vangogh_common_get_gpu_metrics,
2225 .od_edit_dpm_table = vangogh_od_edit_dpm_table,
2226 .print_clk_levels = vangogh_common_print_clk_levels,
2227 .set_default_dpm_table = vangogh_set_default_dpm_tables,
2228 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
2229 .system_features_control = vangogh_system_features_control,
2230 .feature_is_enabled = smu_cmn_feature_is_enabled,
2231 .set_power_profile_mode = vangogh_set_power_profile_mode,
2232 .get_power_profile_mode = vangogh_get_power_profile_mode,
2233 .get_dpm_clock_table = vangogh_get_dpm_clock_table,
2234 .force_clk_levels = vangogh_force_clk_levels,
2235 .set_performance_level = vangogh_set_performance_level,
2236 .post_init = vangogh_post_smu_init,
2237 .mode2_reset = vangogh_mode2_reset,
2238 .gfx_off_control = smu_v11_0_gfx_off_control,
2239 .get_gfx_off_status = vangogh_get_gfxoff_status,
2240 .get_ppt_limit = vangogh_get_ppt_limit,
2241 .get_power_limit = vangogh_get_power_limit,
2242 .set_power_limit = vangogh_set_power_limit,
2243 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2244 };
2245
2246 void vangogh_set_ppt_funcs(struct smu_context *smu)
2247 {
2248 smu->ppt_funcs = &vangogh_ppt_funcs;
2249 smu->message_map = vangogh_message_map;
2250 smu->feature_map = vangogh_feature_mask_map;
2251 smu->table_map = vangogh_table_map;
2252 smu->workload_map = vangogh_workload_map;
2253 smu->is_apu = true;
2254 smu_v11_0_set_smu_mailbox_registers(smu);
2255 }