Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2015 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "pp_debug.h"
0024 #include <linux/types.h>
0025 #include <linux/kernel.h>
0026 #include <linux/slab.h>
0027 #include "atom-types.h"
0028 #include "atombios.h"
0029 #include "processpptables.h"
0030 #include "cgs_common.h"
0031 #include "smumgr.h"
0032 #include "hwmgr.h"
0033 #include "hardwaremanager.h"
0034 #include "rv_ppsmc.h"
0035 #include "smu10_hwmgr.h"
0036 #include "power_state.h"
0037 #include "soc15_common.h"
0038 #include "smu10.h"
0039 #include "asic_reg/pwr/pwr_10_0_offset.h"
0040 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
0041 
0042 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID     5
0043 #define SMU10_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
0044 #define SCLK_MIN_DIV_INTV_SHIFT         12
0045 #define SMU10_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
0046 #define SMC_RAM_END                     0x40000
0047 
0048 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
0049 
0050 
0051 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
0052         struct pp_display_clock_request *clock_req)
0053 {
0054     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0055     enum amd_pp_clock_type clk_type = clock_req->clock_type;
0056     uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
0057     PPSMC_Msg        msg;
0058 
0059     switch (clk_type) {
0060     case amd_pp_dcf_clock:
0061         if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
0062             return 0;
0063         msg =  PPSMC_MSG_SetHardMinDcefclkByFreq;
0064         smu10_data->dcf_actual_hard_min_freq = clk_freq;
0065         break;
0066     case amd_pp_soc_clock:
0067          msg = PPSMC_MSG_SetHardMinSocclkByFreq;
0068         break;
0069     case amd_pp_f_clock:
0070         if (clk_freq == smu10_data->f_actual_hard_min_freq)
0071             return 0;
0072         smu10_data->f_actual_hard_min_freq = clk_freq;
0073         msg = PPSMC_MSG_SetHardMinFclkByFreq;
0074         break;
0075     default:
0076         pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
0077         return -EINVAL;
0078     }
0079     smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
0080 
0081     return 0;
0082 }
0083 
0084 static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
0085 {
0086     if (SMU10_Magic != hw_ps->magic)
0087         return NULL;
0088 
0089     return (struct smu10_power_state *)hw_ps;
0090 }
0091 
0092 static const struct smu10_power_state *cast_const_smu10_ps(
0093                 const struct pp_hw_power_state *hw_ps)
0094 {
0095     if (SMU10_Magic != hw_ps->magic)
0096         return NULL;
0097 
0098     return (struct smu10_power_state *)hw_ps;
0099 }
0100 
0101 static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
0102 {
0103     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0104 
0105     smu10_data->dce_slow_sclk_threshold = 30000;
0106     smu10_data->thermal_auto_throttling_treshold = 0;
0107     smu10_data->is_nb_dpm_enabled = 1;
0108     smu10_data->dpm_flags = 1;
0109     smu10_data->need_min_deep_sleep_dcefclk = true;
0110     smu10_data->num_active_display = 0;
0111     smu10_data->deep_sleep_dcefclk = 0;
0112 
0113     phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
0114                     PHM_PlatformCaps_SclkDeepSleep);
0115 
0116     phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
0117                 PHM_PlatformCaps_SclkThrottleLowNotification);
0118 
0119     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0120                 PHM_PlatformCaps_PowerPlaySupport);
0121     return 0;
0122 }
0123 
0124 static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
0125             struct phm_clock_and_voltage_limits *table)
0126 {
0127     return 0;
0128 }
0129 
0130 static int smu10_init_dynamic_state_adjustment_rule_settings(
0131                             struct pp_hwmgr *hwmgr)
0132 {
0133     int count = 8;
0134     struct phm_clock_voltage_dependency_table *table_clk_vlt;
0135 
0136     table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, count),
0137                 GFP_KERNEL);
0138 
0139     if (NULL == table_clk_vlt) {
0140         pr_err("Can not allocate memory!\n");
0141         return -ENOMEM;
0142     }
0143 
0144     table_clk_vlt->count = count;
0145     table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
0146     table_clk_vlt->entries[0].v = 0;
0147     table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
0148     table_clk_vlt->entries[1].v = 1;
0149     table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
0150     table_clk_vlt->entries[2].v = 2;
0151     table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
0152     table_clk_vlt->entries[3].v = 3;
0153     table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
0154     table_clk_vlt->entries[4].v = 4;
0155     table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
0156     table_clk_vlt->entries[5].v = 5;
0157     table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
0158     table_clk_vlt->entries[6].v = 6;
0159     table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
0160     table_clk_vlt->entries[7].v = 7;
0161     hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
0162 
0163     return 0;
0164 }
0165 
0166 static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
0167 {
0168     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
0169 
0170     smu10_data->sys_info.htc_hyst_lmt = 5;
0171     smu10_data->sys_info.htc_tmp_lmt = 203;
0172 
0173     if (smu10_data->thermal_auto_throttling_treshold == 0)
0174          smu10_data->thermal_auto_throttling_treshold = 203;
0175 
0176     smu10_construct_max_power_limits_table (hwmgr,
0177                     &hwmgr->dyn_state.max_clock_voltage_on_ac);
0178 
0179     smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
0180 
0181     return 0;
0182 }
0183 
0184 static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
0185 {
0186     return 0;
0187 }
0188 
0189 static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
0190 {
0191     struct PP_Clocks clocks = {0};
0192     struct pp_display_clock_request clock_req;
0193 
0194     clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
0195     clock_req.clock_type = amd_pp_dcf_clock;
0196     clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
0197 
0198     PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
0199                 "Attempt to set DCF Clock Failed!", return -EINVAL);
0200 
0201     return 0;
0202 }
0203 
0204 static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
0205 {
0206     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0207 
0208     if (clock && smu10_data->deep_sleep_dcefclk != clock) {
0209         smu10_data->deep_sleep_dcefclk = clock;
0210         smum_send_msg_to_smc_with_parameter(hwmgr,
0211                     PPSMC_MSG_SetMinDeepSleepDcefclk,
0212                     smu10_data->deep_sleep_dcefclk,
0213                     NULL);
0214     }
0215     return 0;
0216 }
0217 
0218 static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
0219 {
0220     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0221 
0222     if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
0223         smu10_data->dcf_actual_hard_min_freq = clock;
0224         smum_send_msg_to_smc_with_parameter(hwmgr,
0225                     PPSMC_MSG_SetHardMinDcefclkByFreq,
0226                     smu10_data->dcf_actual_hard_min_freq,
0227                     NULL);
0228     }
0229     return 0;
0230 }
0231 
0232 static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
0233 {
0234     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0235 
0236     if (clock && smu10_data->f_actual_hard_min_freq != clock) {
0237         smu10_data->f_actual_hard_min_freq = clock;
0238         smum_send_msg_to_smc_with_parameter(hwmgr,
0239                     PPSMC_MSG_SetHardMinFclkByFreq,
0240                     smu10_data->f_actual_hard_min_freq,
0241                     NULL);
0242     }
0243     return 0;
0244 }
0245 
0246 static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
0247 {
0248     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0249 
0250     if (clock && smu10_data->gfx_actual_soft_min_freq != clock) {
0251         smu10_data->gfx_actual_soft_min_freq = clock;
0252         smum_send_msg_to_smc_with_parameter(hwmgr,
0253                     PPSMC_MSG_SetHardMinGfxClk,
0254                     clock,
0255                     NULL);
0256     }
0257     return 0;
0258 }
0259 
0260 static int smu10_set_soft_max_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
0261 {
0262     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0263 
0264     if (clock && smu10_data->gfx_max_freq_limit != (clock * 100))  {
0265         smu10_data->gfx_max_freq_limit = clock * 100;
0266         smum_send_msg_to_smc_with_parameter(hwmgr,
0267                     PPSMC_MSG_SetSoftMaxGfxClk,
0268                     clock,
0269                     NULL);
0270     }
0271     return 0;
0272 }
0273 
0274 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
0275 {
0276     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0277 
0278     if (smu10_data->num_active_display != count) {
0279         smu10_data->num_active_display = count;
0280         smum_send_msg_to_smc_with_parameter(hwmgr,
0281                 PPSMC_MSG_SetDisplayCount,
0282                 smu10_data->num_active_display,
0283                 NULL);
0284     }
0285 
0286     return 0;
0287 }
0288 
0289 static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
0290 {
0291     return smu10_set_clock_limit(hwmgr, input);
0292 }
0293 
0294 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
0295 {
0296     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0297     struct amdgpu_device *adev = hwmgr->adev;
0298 
0299     smu10_data->vcn_power_gated = true;
0300     smu10_data->isp_tileA_power_gated = true;
0301     smu10_data->isp_tileB_power_gated = true;
0302 
0303     if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
0304         return smum_send_msg_to_smc_with_parameter(hwmgr,
0305                                PPSMC_MSG_SetGfxCGPG,
0306                                true,
0307                                NULL);
0308     else
0309         return 0;
0310 }
0311 
0312 
0313 static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
0314 {
0315     return smu10_init_power_gate_state(hwmgr);
0316 }
0317 
0318 static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
0319 {
0320     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0321 
0322     smu10_data->separation_time = 0;
0323     smu10_data->cc6_disable = false;
0324     smu10_data->pstate_disable = false;
0325     smu10_data->cc6_setting_changed = false;
0326 
0327     return 0;
0328 }
0329 
0330 static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
0331 {
0332     return smu10_reset_cc6_data(hwmgr);
0333 }
0334 
0335 static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
0336 {
0337     uint32_t reg;
0338     struct amdgpu_device *adev = hwmgr->adev;
0339 
0340     reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
0341     if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
0342         (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
0343         return true;
0344 
0345     return false;
0346 }
0347 
0348 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
0349 {
0350     struct amdgpu_device *adev = hwmgr->adev;
0351 
0352     if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
0353         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
0354 
0355         /* confirm gfx is back to "on" state */
0356         while (!smu10_is_gfx_on(hwmgr))
0357             msleep(1);
0358     }
0359 
0360     return 0;
0361 }
0362 
0363 static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
0364 {
0365     return 0;
0366 }
0367 
0368 static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
0369 {
0370     struct amdgpu_device *adev = hwmgr->adev;
0371 
0372     if (adev->pm.pp_feature & PP_GFXOFF_MASK)
0373         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
0374 
0375     return 0;
0376 }
0377 
0378 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
0379 {
0380     struct amdgpu_device *adev = hwmgr->adev;
0381     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0382     int ret = -EINVAL;
0383 
0384     if (adev->in_suspend) {
0385         pr_info("restore the fine grain parameters\n");
0386 
0387         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
0388                     PPSMC_MSG_SetHardMinGfxClk,
0389                     smu10_data->gfx_actual_soft_min_freq,
0390                     NULL);
0391         if (ret)
0392             return ret;
0393         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
0394                     PPSMC_MSG_SetSoftMaxGfxClk,
0395                     smu10_data->gfx_actual_soft_max_freq,
0396                     NULL);
0397         if (ret)
0398             return ret;
0399     }
0400 
0401     return 0;
0402 }
0403 
0404 static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
0405 {
0406     if (enable)
0407         return smu10_enable_gfx_off(hwmgr);
0408     else
0409         return smu10_disable_gfx_off(hwmgr);
0410 }
0411 
0412 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
0413                 struct pp_power_state  *prequest_ps,
0414             const struct pp_power_state *pcurrent_ps)
0415 {
0416     return 0;
0417 }
0418 
0419 /* temporary hardcoded clock voltage breakdown tables */
0420 static const DpmClock_t VddDcfClk[]= {
0421     { 300, 2600},
0422     { 600, 3200},
0423     { 600, 3600},
0424 };
0425 
0426 static const DpmClock_t VddSocClk[]= {
0427     { 478, 2600},
0428     { 722, 3200},
0429     { 722, 3600},
0430 };
0431 
0432 static const DpmClock_t VddFClk[]= {
0433     { 400, 2600},
0434     {1200, 3200},
0435     {1200, 3600},
0436 };
0437 
0438 static const DpmClock_t VddDispClk[]= {
0439     { 435, 2600},
0440     { 661, 3200},
0441     {1086, 3600},
0442 };
0443 
0444 static const DpmClock_t VddDppClk[]= {
0445     { 435, 2600},
0446     { 661, 3200},
0447     { 661, 3600},
0448 };
0449 
0450 static const DpmClock_t VddPhyClk[]= {
0451     { 540, 2600},
0452     { 810, 3200},
0453     { 810, 3600},
0454 };
0455 
0456 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
0457             struct smu10_voltage_dependency_table **pptable,
0458             uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
0459 {
0460     uint32_t i;
0461     struct smu10_voltage_dependency_table *ptable;
0462 
0463     ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
0464     if (NULL == ptable)
0465         return -ENOMEM;
0466 
0467     ptable->count = num_entry;
0468 
0469     for (i = 0; i < ptable->count; i++) {
0470         ptable->entries[i].clk         = pclk_dependency_table->Freq * 100;
0471         ptable->entries[i].vol         = pclk_dependency_table->Vol;
0472         pclk_dependency_table++;
0473     }
0474 
0475     *pptable = ptable;
0476 
0477     return 0;
0478 }
0479 
0480 
0481 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
0482 {
0483     uint32_t result;
0484 
0485     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0486     DpmClocks_t  *table = &(smu10_data->clock_table);
0487     struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
0488 
0489     result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
0490 
0491     PP_ASSERT_WITH_CODE((0 == result),
0492             "Attempt to copy clock table from smc failed",
0493             return result);
0494 
0495     if (0 == result && table->DcefClocks[0].Freq != 0) {
0496         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
0497                         NUM_DCEFCLK_DPM_LEVELS,
0498                         &smu10_data->clock_table.DcefClocks[0]);
0499         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
0500                         NUM_SOCCLK_DPM_LEVELS,
0501                         &smu10_data->clock_table.SocClocks[0]);
0502         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
0503                         NUM_FCLK_DPM_LEVELS,
0504                         &smu10_data->clock_table.FClocks[0]);
0505         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
0506                         NUM_MEMCLK_DPM_LEVELS,
0507                         &smu10_data->clock_table.MemClocks[0]);
0508     } else {
0509         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
0510                         ARRAY_SIZE(VddDcfClk),
0511                         &VddDcfClk[0]);
0512         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
0513                         ARRAY_SIZE(VddSocClk),
0514                         &VddSocClk[0]);
0515         smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
0516                         ARRAY_SIZE(VddFClk),
0517                         &VddFClk[0]);
0518     }
0519     smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
0520                     ARRAY_SIZE(VddDispClk),
0521                     &VddDispClk[0]);
0522     smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
0523                     ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
0524     smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
0525                     ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
0526 
0527     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
0528     smu10_data->gfx_min_freq_limit = result / 10 * 1000;
0529 
0530     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
0531     smu10_data->gfx_max_freq_limit = result / 10 * 1000;
0532 
0533     return 0;
0534 }
0535 
0536 static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
0537 {
0538     int result = 0;
0539     struct smu10_hwmgr *data;
0540 
0541     data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
0542     if (data == NULL)
0543         return -ENOMEM;
0544 
0545     hwmgr->backend = data;
0546 
0547     result = smu10_initialize_dpm_defaults(hwmgr);
0548     if (result != 0) {
0549         pr_err("smu10_initialize_dpm_defaults failed\n");
0550         return result;
0551     }
0552 
0553     smu10_populate_clock_table(hwmgr);
0554 
0555     result = smu10_get_system_info_data(hwmgr);
0556     if (result != 0) {
0557         pr_err("smu10_get_system_info_data failed\n");
0558         return result;
0559     }
0560 
0561     smu10_construct_boot_state(hwmgr);
0562 
0563     hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
0564                         SMU10_MAX_HARDWARE_POWERLEVELS;
0565 
0566     hwmgr->platform_descriptor.hardwarePerformanceLevels =
0567                         SMU10_MAX_HARDWARE_POWERLEVELS;
0568 
0569     hwmgr->platform_descriptor.vbiosInterruptId = 0;
0570 
0571     hwmgr->platform_descriptor.clockStep.engineClock = 500;
0572 
0573     hwmgr->platform_descriptor.clockStep.memoryClock = 500;
0574 
0575     hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
0576 
0577     hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
0578     hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
0579 
0580     /* enable the pp_od_clk_voltage sysfs file */
0581     hwmgr->od_enabled = 1;
0582     /* disabled fine grain tuning function by default */
0583     data->fine_grain_enabled = 0;
0584     return result;
0585 }
0586 
0587 static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
0588 {
0589     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
0590     struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
0591 
0592     kfree(pinfo->vdd_dep_on_dcefclk);
0593     pinfo->vdd_dep_on_dcefclk = NULL;
0594     kfree(pinfo->vdd_dep_on_socclk);
0595     pinfo->vdd_dep_on_socclk = NULL;
0596     kfree(pinfo->vdd_dep_on_fclk);
0597     pinfo->vdd_dep_on_fclk = NULL;
0598     kfree(pinfo->vdd_dep_on_dispclk);
0599     pinfo->vdd_dep_on_dispclk = NULL;
0600     kfree(pinfo->vdd_dep_on_dppclk);
0601     pinfo->vdd_dep_on_dppclk = NULL;
0602     kfree(pinfo->vdd_dep_on_phyclk);
0603     pinfo->vdd_dep_on_phyclk = NULL;
0604 
0605     kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
0606     hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
0607 
0608     kfree(hwmgr->backend);
0609     hwmgr->backend = NULL;
0610 
0611     return 0;
0612 }
0613 
0614 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
0615                 enum amd_dpm_forced_level level)
0616 {
0617     struct smu10_hwmgr *data = hwmgr->backend;
0618     uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
0619     uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
0620     uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
0621     uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
0622     uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
0623 
0624     if (hwmgr->smu_version < 0x1E3700) {
0625         pr_info("smu firmware version too old, can not set dpm level\n");
0626         return 0;
0627     }
0628 
0629     if (min_sclk < data->gfx_min_freq_limit)
0630         min_sclk = data->gfx_min_freq_limit;
0631 
0632     min_sclk /= 100; /* transfer 10KHz to MHz */
0633     if (min_mclk < data->clock_table.FClocks[0].Freq)
0634         min_mclk = data->clock_table.FClocks[0].Freq;
0635 
0636     switch (level) {
0637     case AMD_DPM_FORCED_LEVEL_HIGH:
0638     case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
0639         data->fine_grain_enabled = 0;
0640 
0641         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
0642         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
0643 
0644         data->gfx_actual_soft_min_freq = fine_grain_min_freq;
0645         data->gfx_actual_soft_max_freq = fine_grain_max_freq;
0646 
0647         smum_send_msg_to_smc_with_parameter(hwmgr,
0648                         PPSMC_MSG_SetHardMinGfxClk,
0649                         data->gfx_max_freq_limit/100,
0650                         NULL);
0651         smum_send_msg_to_smc_with_parameter(hwmgr,
0652                         PPSMC_MSG_SetHardMinFclkByFreq,
0653                         SMU10_UMD_PSTATE_PEAK_FCLK,
0654                         NULL);
0655         smum_send_msg_to_smc_with_parameter(hwmgr,
0656                         PPSMC_MSG_SetHardMinSocclkByFreq,
0657                         SMU10_UMD_PSTATE_PEAK_SOCCLK,
0658                         NULL);
0659         smum_send_msg_to_smc_with_parameter(hwmgr,
0660                         PPSMC_MSG_SetHardMinVcn,
0661                         SMU10_UMD_PSTATE_VCE,
0662                         NULL);
0663 
0664         smum_send_msg_to_smc_with_parameter(hwmgr,
0665                         PPSMC_MSG_SetSoftMaxGfxClk,
0666                         data->gfx_max_freq_limit/100,
0667                         NULL);
0668         smum_send_msg_to_smc_with_parameter(hwmgr,
0669                         PPSMC_MSG_SetSoftMaxFclkByFreq,
0670                         SMU10_UMD_PSTATE_PEAK_FCLK,
0671                         NULL);
0672         smum_send_msg_to_smc_with_parameter(hwmgr,
0673                         PPSMC_MSG_SetSoftMaxSocclkByFreq,
0674                         SMU10_UMD_PSTATE_PEAK_SOCCLK,
0675                         NULL);
0676         smum_send_msg_to_smc_with_parameter(hwmgr,
0677                         PPSMC_MSG_SetSoftMaxVcn,
0678                         SMU10_UMD_PSTATE_VCE,
0679                         NULL);
0680         break;
0681     case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
0682         data->fine_grain_enabled = 0;
0683 
0684         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
0685         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
0686 
0687         data->gfx_actual_soft_min_freq = fine_grain_min_freq;
0688         data->gfx_actual_soft_max_freq = fine_grain_max_freq;
0689 
0690         smum_send_msg_to_smc_with_parameter(hwmgr,
0691                         PPSMC_MSG_SetHardMinGfxClk,
0692                         min_sclk,
0693                         NULL);
0694         smum_send_msg_to_smc_with_parameter(hwmgr,
0695                         PPSMC_MSG_SetSoftMaxGfxClk,
0696                         min_sclk,
0697                         NULL);
0698         break;
0699     case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
0700         data->fine_grain_enabled = 0;
0701 
0702         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
0703         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
0704 
0705         data->gfx_actual_soft_min_freq = fine_grain_min_freq;
0706         data->gfx_actual_soft_max_freq = fine_grain_max_freq;
0707 
0708         smum_send_msg_to_smc_with_parameter(hwmgr,
0709                         PPSMC_MSG_SetHardMinFclkByFreq,
0710                         min_mclk,
0711                         NULL);
0712         smum_send_msg_to_smc_with_parameter(hwmgr,
0713                         PPSMC_MSG_SetSoftMaxFclkByFreq,
0714                         min_mclk,
0715                         NULL);
0716         break;
0717     case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
0718         data->fine_grain_enabled = 0;
0719 
0720         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
0721         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
0722 
0723         data->gfx_actual_soft_min_freq = fine_grain_min_freq;
0724         data->gfx_actual_soft_max_freq = fine_grain_max_freq;
0725 
0726         smum_send_msg_to_smc_with_parameter(hwmgr,
0727                         PPSMC_MSG_SetHardMinGfxClk,
0728                         SMU10_UMD_PSTATE_GFXCLK,
0729                         NULL);
0730         smum_send_msg_to_smc_with_parameter(hwmgr,
0731                         PPSMC_MSG_SetHardMinFclkByFreq,
0732                         SMU10_UMD_PSTATE_FCLK,
0733                         NULL);
0734         smum_send_msg_to_smc_with_parameter(hwmgr,
0735                         PPSMC_MSG_SetHardMinSocclkByFreq,
0736                         SMU10_UMD_PSTATE_SOCCLK,
0737                         NULL);
0738         smum_send_msg_to_smc_with_parameter(hwmgr,
0739                         PPSMC_MSG_SetHardMinVcn,
0740                         SMU10_UMD_PSTATE_PROFILE_VCE,
0741                         NULL);
0742 
0743         smum_send_msg_to_smc_with_parameter(hwmgr,
0744                         PPSMC_MSG_SetSoftMaxGfxClk,
0745                         SMU10_UMD_PSTATE_GFXCLK,
0746                         NULL);
0747         smum_send_msg_to_smc_with_parameter(hwmgr,
0748                         PPSMC_MSG_SetSoftMaxFclkByFreq,
0749                         SMU10_UMD_PSTATE_FCLK,
0750                         NULL);
0751         smum_send_msg_to_smc_with_parameter(hwmgr,
0752                         PPSMC_MSG_SetSoftMaxSocclkByFreq,
0753                         SMU10_UMD_PSTATE_SOCCLK,
0754                         NULL);
0755         smum_send_msg_to_smc_with_parameter(hwmgr,
0756                         PPSMC_MSG_SetSoftMaxVcn,
0757                         SMU10_UMD_PSTATE_PROFILE_VCE,
0758                         NULL);
0759         break;
0760     case AMD_DPM_FORCED_LEVEL_AUTO:
0761         data->fine_grain_enabled = 0;
0762 
0763         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
0764         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
0765 
0766         data->gfx_actual_soft_min_freq = fine_grain_min_freq;
0767         data->gfx_actual_soft_max_freq = fine_grain_max_freq;
0768 
0769         smum_send_msg_to_smc_with_parameter(hwmgr,
0770                         PPSMC_MSG_SetHardMinGfxClk,
0771                         min_sclk,
0772                         NULL);
0773         smum_send_msg_to_smc_with_parameter(hwmgr,
0774                         PPSMC_MSG_SetHardMinFclkByFreq,
0775                         hwmgr->display_config->num_display > 3 ?
0776                         (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
0777                         min_mclk,
0778                         NULL);
0779 
0780         smum_send_msg_to_smc_with_parameter(hwmgr,
0781                         PPSMC_MSG_SetHardMinSocclkByFreq,
0782                         data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
0783                         NULL);
0784         smum_send_msg_to_smc_with_parameter(hwmgr,
0785                         PPSMC_MSG_SetHardMinVcn,
0786                         SMU10_UMD_PSTATE_MIN_VCE,
0787                         NULL);
0788 
0789         smum_send_msg_to_smc_with_parameter(hwmgr,
0790                         PPSMC_MSG_SetSoftMaxGfxClk,
0791                         data->gfx_max_freq_limit/100,
0792                         NULL);
0793         smum_send_msg_to_smc_with_parameter(hwmgr,
0794                         PPSMC_MSG_SetSoftMaxFclkByFreq,
0795                         data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
0796                         NULL);
0797         smum_send_msg_to_smc_with_parameter(hwmgr,
0798                         PPSMC_MSG_SetSoftMaxSocclkByFreq,
0799                         data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
0800                         NULL);
0801         smum_send_msg_to_smc_with_parameter(hwmgr,
0802                         PPSMC_MSG_SetSoftMaxVcn,
0803                         SMU10_UMD_PSTATE_VCE,
0804                         NULL);
0805         break;
0806     case AMD_DPM_FORCED_LEVEL_LOW:
0807         data->fine_grain_enabled = 0;
0808 
0809         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
0810         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
0811 
0812         data->gfx_actual_soft_min_freq = fine_grain_min_freq;
0813         data->gfx_actual_soft_max_freq = fine_grain_max_freq;
0814 
0815         smum_send_msg_to_smc_with_parameter(hwmgr,
0816                         PPSMC_MSG_SetHardMinGfxClk,
0817                         data->gfx_min_freq_limit/100,
0818                         NULL);
0819         smum_send_msg_to_smc_with_parameter(hwmgr,
0820                         PPSMC_MSG_SetSoftMaxGfxClk,
0821                         data->gfx_min_freq_limit/100,
0822                         NULL);
0823         smum_send_msg_to_smc_with_parameter(hwmgr,
0824                         PPSMC_MSG_SetHardMinFclkByFreq,
0825                         min_mclk,
0826                         NULL);
0827         smum_send_msg_to_smc_with_parameter(hwmgr,
0828                         PPSMC_MSG_SetSoftMaxFclkByFreq,
0829                         min_mclk,
0830                         NULL);
0831         break;
0832     case AMD_DPM_FORCED_LEVEL_MANUAL:
0833         data->fine_grain_enabled = 1;
0834         break;
0835     case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
0836     default:
0837         break;
0838     }
0839     return 0;
0840 }
0841 
0842 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
0843 {
0844     struct smu10_hwmgr *data;
0845 
0846     if (hwmgr == NULL)
0847         return -EINVAL;
0848 
0849     data = (struct smu10_hwmgr *)(hwmgr->backend);
0850 
0851     if (low)
0852         return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
0853     else
0854         return data->clock_vol_info.vdd_dep_on_fclk->entries[
0855             data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
0856 }
0857 
0858 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
0859 {
0860     struct smu10_hwmgr *data;
0861 
0862     if (hwmgr == NULL)
0863         return -EINVAL;
0864 
0865     data = (struct smu10_hwmgr *)(hwmgr->backend);
0866 
0867     if (low)
0868         return data->gfx_min_freq_limit;
0869     else
0870         return data->gfx_max_freq_limit;
0871 }
0872 
0873 static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
0874                     struct pp_hw_power_state *hw_ps)
0875 {
0876     return 0;
0877 }
0878 
0879 static int smu10_dpm_get_pp_table_entry_callback(
0880                              struct pp_hwmgr *hwmgr,
0881                        struct pp_hw_power_state *hw_ps,
0882                               unsigned int index,
0883                              const void *clock_info)
0884 {
0885     struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
0886 
0887     smu10_ps->levels[index].engine_clock = 0;
0888 
0889     smu10_ps->levels[index].vddc_index = 0;
0890     smu10_ps->level = index + 1;
0891 
0892     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
0893         smu10_ps->levels[index].ds_divider_index = 5;
0894         smu10_ps->levels[index].ss_divider_index = 5;
0895     }
0896 
0897     return 0;
0898 }
0899 
0900 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
0901 {
0902     int result;
0903     unsigned long ret = 0;
0904 
0905     result = pp_tables_get_num_of_entries(hwmgr, &ret);
0906 
0907     return result ? 0 : ret;
0908 }
0909 
0910 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
0911             unsigned long entry, struct pp_power_state *ps)
0912 {
0913     int result;
0914     struct smu10_power_state *smu10_ps;
0915 
0916     ps->hardware.magic = SMU10_Magic;
0917 
0918     smu10_ps = cast_smu10_ps(&(ps->hardware));
0919 
0920     result = pp_tables_get_entry(hwmgr, entry, ps,
0921             smu10_dpm_get_pp_table_entry_callback);
0922 
0923     smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
0924     smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
0925 
0926     return result;
0927 }
0928 
0929 static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
0930 {
0931     return sizeof(struct smu10_power_state);
0932 }
0933 
0934 static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
0935 {
0936     return 0;
0937 }
0938 
0939 
0940 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
0941             bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
0942 {
0943     struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
0944 
0945     if (separation_time != data->separation_time ||
0946             cc6_disable != data->cc6_disable ||
0947             pstate_disable != data->pstate_disable) {
0948         data->separation_time = separation_time;
0949         data->cc6_disable = cc6_disable;
0950         data->pstate_disable = pstate_disable;
0951         data->cc6_setting_changed = true;
0952     }
0953     return 0;
0954 }
0955 
0956 static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
0957         struct amd_pp_simple_clock_info *info)
0958 {
0959     return -EINVAL;
0960 }
0961 
0962 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
0963         enum pp_clock_type type, uint32_t mask)
0964 {
0965     struct smu10_hwmgr *data = hwmgr->backend;
0966     struct smu10_voltage_dependency_table *mclk_table =
0967                     data->clock_vol_info.vdd_dep_on_fclk;
0968     uint32_t low, high;
0969 
0970     low = mask ? (ffs(mask) - 1) : 0;
0971     high = mask ? (fls(mask) - 1) : 0;
0972 
0973     switch (type) {
0974     case PP_SCLK:
0975         if (low > 2 || high > 2) {
0976             pr_info("Currently sclk only support 3 levels on RV\n");
0977             return -EINVAL;
0978         }
0979 
0980         smum_send_msg_to_smc_with_parameter(hwmgr,
0981                         PPSMC_MSG_SetHardMinGfxClk,
0982                         low == 2 ? data->gfx_max_freq_limit/100 :
0983                         low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
0984                         data->gfx_min_freq_limit/100,
0985                         NULL);
0986 
0987         smum_send_msg_to_smc_with_parameter(hwmgr,
0988                         PPSMC_MSG_SetSoftMaxGfxClk,
0989                         high == 0 ? data->gfx_min_freq_limit/100 :
0990                         high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
0991                         data->gfx_max_freq_limit/100,
0992                         NULL);
0993         break;
0994 
0995     case PP_MCLK:
0996         if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
0997             return -EINVAL;
0998 
0999         smum_send_msg_to_smc_with_parameter(hwmgr,
1000                         PPSMC_MSG_SetHardMinFclkByFreq,
1001                         mclk_table->entries[low].clk/100,
1002                         NULL);
1003 
1004         smum_send_msg_to_smc_with_parameter(hwmgr,
1005                         PPSMC_MSG_SetSoftMaxFclkByFreq,
1006                         mclk_table->entries[high].clk/100,
1007                         NULL);
1008         break;
1009 
1010     case PP_PCIE:
1011     default:
1012         break;
1013     }
1014     return 0;
1015 }
1016 
1017 static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
1018         enum pp_clock_type type, char *buf)
1019 {
1020     struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
1021     struct smu10_voltage_dependency_table *mclk_table =
1022             data->clock_vol_info.vdd_dep_on_fclk;
1023     uint32_t i, now, size = 0;
1024     uint32_t min_freq, max_freq = 0;
1025     uint32_t ret = 0;
1026 
1027     switch (type) {
1028     case PP_SCLK:
1029         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
1030 
1031     /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
1032         if (now == data->gfx_max_freq_limit/100)
1033             i = 2;
1034         else if (now == data->gfx_min_freq_limit/100)
1035             i = 0;
1036         else
1037             i = 1;
1038 
1039         size += sprintf(buf + size, "0: %uMhz %s\n",
1040                     data->gfx_min_freq_limit/100,
1041                     i == 0 ? "*" : "");
1042         size += sprintf(buf + size, "1: %uMhz %s\n",
1043                     i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
1044                     i == 1 ? "*" : "");
1045         size += sprintf(buf + size, "2: %uMhz %s\n",
1046                     data->gfx_max_freq_limit/100,
1047                     i == 2 ? "*" : "");
1048         break;
1049     case PP_MCLK:
1050         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
1051 
1052         for (i = 0; i < mclk_table->count; i++)
1053             size += sprintf(buf + size, "%d: %uMhz %s\n",
1054                     i,
1055                     mclk_table->entries[i].clk / 100,
1056                     ((mclk_table->entries[i].clk / 100)
1057                      == now) ? "*" : "");
1058         break;
1059     case OD_SCLK:
1060         if (hwmgr->od_enabled) {
1061             ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
1062             if (ret)
1063                 return ret;
1064             ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
1065             if (ret)
1066                 return ret;
1067 
1068             size += sprintf(buf + size, "%s:\n", "OD_SCLK");
1069             size += sprintf(buf + size, "0: %10uMhz\n",
1070             (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
1071             size += sprintf(buf + size, "1: %10uMhz\n",
1072             (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
1073         }
1074         break;
1075     case OD_RANGE:
1076         if (hwmgr->od_enabled) {
1077             ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
1078             if (ret)
1079                 return ret;
1080             ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
1081             if (ret)
1082                 return ret;
1083 
1084             size += sprintf(buf + size, "%s:\n", "OD_RANGE");
1085             size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
1086                 min_freq, max_freq);
1087         }
1088         break;
1089     default:
1090         break;
1091     }
1092 
1093     return size;
1094 }
1095 
1096 static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1097                 PHM_PerformanceLevelDesignation designation, uint32_t index,
1098                 PHM_PerformanceLevel *level)
1099 {
1100     struct smu10_hwmgr *data;
1101 
1102     if (level == NULL || hwmgr == NULL || state == NULL)
1103         return -EINVAL;
1104 
1105     data = (struct smu10_hwmgr *)(hwmgr->backend);
1106 
1107     if (index == 0) {
1108         level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
1109         level->coreClock = data->gfx_min_freq_limit;
1110     } else {
1111         level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
1112             data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
1113         level->coreClock = data->gfx_max_freq_limit;
1114     }
1115 
1116     level->nonLocalMemoryFreq = 0;
1117     level->nonLocalMemoryWidth = 0;
1118 
1119     return 0;
1120 }
1121 
1122 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1123     const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1124 {
1125     const struct smu10_power_state *ps = cast_const_smu10_ps(state);
1126 
1127     clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
1128     clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
1129 
1130     return 0;
1131 }
1132 
1133 #define MEM_FREQ_LOW_LATENCY        25000
1134 #define MEM_FREQ_HIGH_LATENCY       80000
1135 #define MEM_LATENCY_HIGH            245
1136 #define MEM_LATENCY_LOW             35
1137 #define MEM_LATENCY_ERR             0xFFFF
1138 
1139 
1140 static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
1141         uint32_t clock)
1142 {
1143     if (clock >= MEM_FREQ_LOW_LATENCY &&
1144             clock < MEM_FREQ_HIGH_LATENCY)
1145         return MEM_LATENCY_HIGH;
1146     else if (clock >= MEM_FREQ_HIGH_LATENCY)
1147         return MEM_LATENCY_LOW;
1148     else
1149         return MEM_LATENCY_ERR;
1150 }
1151 
1152 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1153         enum amd_pp_clock_type type,
1154         struct pp_clock_levels_with_latency *clocks)
1155 {
1156     uint32_t i;
1157     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1158     struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1159     struct smu10_voltage_dependency_table *pclk_vol_table;
1160     bool latency_required = false;
1161 
1162     if (pinfo == NULL)
1163         return -EINVAL;
1164 
1165     switch (type) {
1166     case amd_pp_mem_clock:
1167         pclk_vol_table = pinfo->vdd_dep_on_mclk;
1168         latency_required = true;
1169         break;
1170     case amd_pp_f_clock:
1171         pclk_vol_table = pinfo->vdd_dep_on_fclk;
1172         latency_required = true;
1173         break;
1174     case amd_pp_dcf_clock:
1175         pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1176         break;
1177     case amd_pp_disp_clock:
1178         pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1179         break;
1180     case amd_pp_phy_clock:
1181         pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1182         break;
1183     case amd_pp_dpp_clock:
1184         pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1185         break;
1186     default:
1187         return -EINVAL;
1188     }
1189 
1190     if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1191         return -EINVAL;
1192 
1193     clocks->num_levels = 0;
1194     for (i = 0; i < pclk_vol_table->count; i++) {
1195         if (pclk_vol_table->entries[i].clk) {
1196             clocks->data[clocks->num_levels].clocks_in_khz =
1197                 pclk_vol_table->entries[i].clk * 10;
1198             clocks->data[clocks->num_levels].latency_in_us = latency_required ?
1199                 smu10_get_mem_latency(hwmgr,
1200                               pclk_vol_table->entries[i].clk) :
1201                 0;
1202             clocks->num_levels++;
1203         }
1204     }
1205 
1206     return 0;
1207 }
1208 
1209 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1210         enum amd_pp_clock_type type,
1211         struct pp_clock_levels_with_voltage *clocks)
1212 {
1213     uint32_t i;
1214     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1215     struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1216     struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
1217 
1218     if (pinfo == NULL)
1219         return -EINVAL;
1220 
1221     switch (type) {
1222     case amd_pp_mem_clock:
1223         pclk_vol_table = pinfo->vdd_dep_on_mclk;
1224         break;
1225     case amd_pp_f_clock:
1226         pclk_vol_table = pinfo->vdd_dep_on_fclk;
1227         break;
1228     case amd_pp_dcf_clock:
1229         pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1230         break;
1231     case amd_pp_soc_clock:
1232         pclk_vol_table = pinfo->vdd_dep_on_socclk;
1233         break;
1234     case amd_pp_disp_clock:
1235         pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1236         break;
1237     case amd_pp_phy_clock:
1238         pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1239         break;
1240     default:
1241         return -EINVAL;
1242     }
1243 
1244     if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1245         return -EINVAL;
1246 
1247     clocks->num_levels = 0;
1248     for (i = 0; i < pclk_vol_table->count; i++) {
1249         if (pclk_vol_table->entries[i].clk) {
1250             clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
1251             clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
1252             clocks->num_levels++;
1253         }
1254     }
1255 
1256     return 0;
1257 }
1258 
1259 
1260 
1261 static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1262 {
1263     clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1264     return 0;
1265 }
1266 
1267 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1268 {
1269     struct amdgpu_device *adev = hwmgr->adev;
1270     uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1271     int cur_temp =
1272         (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1273 
1274     if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1275         cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1276     else
1277         cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1278 
1279     return cur_temp;
1280 }
1281 
1282 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1283               void *value, int *size)
1284 {
1285     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1286     struct amdgpu_device *adev = hwmgr->adev;
1287     uint32_t sclk, mclk, activity_percent;
1288     bool has_gfx_busy;
1289     int ret = 0;
1290 
1291     /* GetGfxBusy support was added on RV SMU FW 30.85.00 and PCO 4.30.59 */
1292     if ((adev->apu_flags & AMD_APU_IS_PICASSO) &&
1293         (hwmgr->smu_version >= 0x41e3b))
1294         has_gfx_busy = true;
1295     else if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
1296          (hwmgr->smu_version >= 0x1e5500))
1297         has_gfx_busy = true;
1298     else
1299         has_gfx_busy = false;
1300 
1301     switch (idx) {
1302     case AMDGPU_PP_SENSOR_GFX_SCLK:
1303         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
1304             /* in units of 10KHZ */
1305         *((uint32_t *)value) = sclk * 100;
1306         *size = 4;
1307         break;
1308     case AMDGPU_PP_SENSOR_GFX_MCLK:
1309         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
1310             /* in units of 10KHZ */
1311         *((uint32_t *)value) = mclk * 100;
1312         *size = 4;
1313         break;
1314     case AMDGPU_PP_SENSOR_GPU_TEMP:
1315         *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1316         break;
1317     case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
1318         *(uint32_t *)value =  smu10_data->vcn_power_gated ? 0 : 1;
1319         *size = 4;
1320         break;
1321     case AMDGPU_PP_SENSOR_GPU_LOAD:
1322         if (!has_gfx_busy)
1323             ret = -EOPNOTSUPP;
1324         else {
1325             ret = smum_send_msg_to_smc(hwmgr,
1326                            PPSMC_MSG_GetGfxBusy,
1327                            &activity_percent);
1328             if (!ret)
1329                 *((uint32_t *)value) = min(activity_percent, (u32)100);
1330             else
1331                 ret = -EIO;
1332         }
1333         break;
1334     default:
1335         ret = -EOPNOTSUPP;
1336         break;
1337     }
1338 
1339     return ret;
1340 }
1341 
1342 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1343         void *clock_ranges)
1344 {
1345     struct smu10_hwmgr *data = hwmgr->backend;
1346     struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1347     Watermarks_t *table = &(data->water_marks_table);
1348     struct amdgpu_device *adev = hwmgr->adev;
1349     int i;
1350 
1351     smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1352 
1353     if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1354         for (i = 0; i < NUM_WM_RANGES; i++)
1355             table->WatermarkRow[WM_DCFCLK][i].WmType = (uint8_t)0;
1356 
1357         for (i = 0; i < NUM_WM_RANGES; i++)
1358             table->WatermarkRow[WM_SOCCLK][i].WmType = (uint8_t)0;
1359     }
1360 
1361     smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1362     data->water_marks_exist = true;
1363     return 0;
1364 }
1365 
1366 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1367 {
1368 
1369     return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
1370 }
1371 
1372 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1373 {
1374     return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
1375 }
1376 
1377 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
1378 {
1379     if (gate)
1380         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
1381     else
1382         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
1383 }
1384 
1385 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1386 {
1387     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1388 
1389     if (bgate) {
1390         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1391                         AMD_IP_BLOCK_TYPE_VCN,
1392                         AMD_PG_STATE_GATE);
1393         smum_send_msg_to_smc_with_parameter(hwmgr,
1394                     PPSMC_MSG_PowerDownVcn, 0, NULL);
1395         smu10_data->vcn_power_gated = true;
1396     } else {
1397         smum_send_msg_to_smc_with_parameter(hwmgr,
1398                         PPSMC_MSG_PowerUpVcn, 0, NULL);
1399         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1400                         AMD_IP_BLOCK_TYPE_VCN,
1401                         AMD_PG_STATE_UNGATE);
1402         smu10_data->vcn_power_gated = false;
1403     }
1404 }
1405 
1406 static int conv_power_profile_to_pplib_workload(int power_profile)
1407 {
1408     int pplib_workload = 0;
1409 
1410     switch (power_profile) {
1411     case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1412         pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1413         break;
1414     case PP_SMC_POWER_PROFILE_VIDEO:
1415         pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1416         break;
1417     case PP_SMC_POWER_PROFILE_VR:
1418         pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1419         break;
1420     case PP_SMC_POWER_PROFILE_COMPUTE:
1421         pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1422         break;
1423     case PP_SMC_POWER_PROFILE_CUSTOM:
1424         pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1425         break;
1426     }
1427 
1428     return pplib_workload;
1429 }
1430 
1431 static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
1432 {
1433     uint32_t i, size = 0;
1434     static const uint8_t
1435         profile_mode_setting[6][4] = {{70, 60, 0, 0,},
1436                         {70, 60, 1, 3,},
1437                         {90, 60, 0, 0,},
1438                         {70, 60, 0, 0,},
1439                         {70, 90, 0, 0,},
1440                         {30, 60, 0, 6,},
1441                         };
1442     static const char *title[6] = {"NUM",
1443             "MODE_NAME",
1444             "BUSY_SET_POINT",
1445             "FPS",
1446             "USE_RLC_BUSY",
1447             "MIN_ACTIVE_LEVEL"};
1448 
1449     if (!buf)
1450         return -EINVAL;
1451 
1452     phm_get_sysfs_buf(&buf, &size);
1453 
1454     size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
1455             title[1], title[2], title[3], title[4], title[5]);
1456 
1457     for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
1458         size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
1459             i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
1460             profile_mode_setting[i][0], profile_mode_setting[i][1],
1461             profile_mode_setting[i][2], profile_mode_setting[i][3]);
1462 
1463     return size;
1464 }
1465 
1466 static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
1467 {
1468     struct amdgpu_device *adev = hwmgr->adev;
1469     if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
1470         (hwmgr->smu_version >= 0x41e2b))
1471         return true;
1472     else
1473         return false;
1474 }
1475 
1476 static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
1477 {
1478     int workload_type = 0;
1479     int result = 0;
1480 
1481     if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
1482         pr_err("Invalid power profile mode %ld\n", input[size]);
1483         return -EINVAL;
1484     }
1485     if (hwmgr->power_profile_mode == input[size])
1486         return 0;
1487 
1488     /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1489     workload_type =
1490         conv_power_profile_to_pplib_workload(input[size]);
1491     if (workload_type &&
1492         smu10_is_raven1_refresh(hwmgr) &&
1493         !hwmgr->gfxoff_state_changed_by_workload) {
1494         smu10_gfx_off_control(hwmgr, false);
1495         hwmgr->gfxoff_state_changed_by_workload = true;
1496     }
1497     result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1498                         1 << workload_type,
1499                         NULL);
1500     if (!result)
1501         hwmgr->power_profile_mode = input[size];
1502     if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
1503         smu10_gfx_off_control(hwmgr, true);
1504         hwmgr->gfxoff_state_changed_by_workload = false;
1505     }
1506 
1507     return 0;
1508 }
1509 
1510 static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode)
1511 {
1512     return smum_send_msg_to_smc_with_parameter(hwmgr,
1513                            PPSMC_MSG_DeviceDriverReset,
1514                            mode,
1515                            NULL);
1516 }
1517 
1518 static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
1519                     enum PP_OD_DPM_TABLE_COMMAND type,
1520                     long *input, uint32_t size)
1521 {
1522     uint32_t min_freq, max_freq = 0;
1523     struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1524     int ret = 0;
1525 
1526     if (!hwmgr->od_enabled) {
1527         pr_err("Fine grain not support\n");
1528         return -EINVAL;
1529     }
1530 
1531     if (!smu10_data->fine_grain_enabled) {
1532         pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
1533         return -EINVAL;
1534     }
1535 
1536     if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
1537         if (size != 2) {
1538             pr_err("Input parameter number not correct\n");
1539             return -EINVAL;
1540         }
1541 
1542         if (input[0] == 0) {
1543             smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
1544             if (input[1] < min_freq) {
1545                 pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1546                     input[1], min_freq);
1547                 return -EINVAL;
1548             }
1549             smu10_data->gfx_actual_soft_min_freq = input[1];
1550         } else if (input[0] == 1) {
1551             smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
1552             if (input[1] > max_freq) {
1553                 pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1554                     input[1], max_freq);
1555                 return -EINVAL;
1556             }
1557             smu10_data->gfx_actual_soft_max_freq = input[1];
1558         } else {
1559             return -EINVAL;
1560         }
1561     } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
1562         if (size != 0) {
1563             pr_err("Input parameter number not correct\n");
1564             return -EINVAL;
1565         }
1566         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
1567         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
1568 
1569         smu10_data->gfx_actual_soft_min_freq = min_freq;
1570         smu10_data->gfx_actual_soft_max_freq = max_freq;
1571     } else if (type == PP_OD_COMMIT_DPM_TABLE) {
1572         if (size != 0) {
1573             pr_err("Input parameter number not correct\n");
1574             return -EINVAL;
1575         }
1576 
1577         if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
1578             pr_err("The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1579                     smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
1580             return -EINVAL;
1581         }
1582 
1583         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1584                     PPSMC_MSG_SetHardMinGfxClk,
1585                     smu10_data->gfx_actual_soft_min_freq,
1586                     NULL);
1587         if (ret)
1588             return ret;
1589 
1590         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1591                     PPSMC_MSG_SetSoftMaxGfxClk,
1592                     smu10_data->gfx_actual_soft_max_freq,
1593                     NULL);
1594         if (ret)
1595             return ret;
1596     } else {
1597         return -EINVAL;
1598     }
1599 
1600     return 0;
1601 }
1602 
1603 static int smu10_gfx_state_change(struct pp_hwmgr *hwmgr, uint32_t state)
1604 {
1605     smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GpuChangeState, state, NULL);
1606 
1607     return 0;
1608 }
1609 
1610 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1611     .backend_init = smu10_hwmgr_backend_init,
1612     .backend_fini = smu10_hwmgr_backend_fini,
1613     .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1614     .force_dpm_level = smu10_dpm_force_dpm_level,
1615     .get_power_state_size = smu10_get_power_state_size,
1616     .powerdown_uvd = NULL,
1617     .powergate_uvd = smu10_powergate_vcn,
1618     .powergate_vce = NULL,
1619     .get_mclk = smu10_dpm_get_mclk,
1620     .get_sclk = smu10_dpm_get_sclk,
1621     .patch_boot_state = smu10_dpm_patch_boot_state,
1622     .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1623     .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1624     .set_cpu_power_state = smu10_set_cpu_power_state,
1625     .store_cc6_data = smu10_store_cc6_data,
1626     .force_clock_level = smu10_force_clock_level,
1627     .print_clock_levels = smu10_print_clock_levels,
1628     .get_dal_power_level = smu10_get_dal_power_level,
1629     .get_performance_level = smu10_get_performance_level,
1630     .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1631     .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1632     .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1633     .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1634     .get_max_high_clocks = smu10_get_max_high_clocks,
1635     .read_sensor = smu10_read_sensor,
1636     .set_active_display_count = smu10_set_active_display_count,
1637     .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
1638     .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1639     .power_off_asic = smu10_power_off_asic,
1640     .asic_setup = smu10_setup_asic_task,
1641     .power_state_set = smu10_set_power_state_tasks,
1642     .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1643     .powergate_mmhub = smu10_powergate_mmhub,
1644     .smus_notify_pwe = smu10_smus_notify_pwe,
1645     .display_clock_voltage_request = smu10_display_clock_voltage_request,
1646     .powergate_gfx = smu10_gfx_off_control,
1647     .powergate_sdma = smu10_powergate_sdma,
1648     .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
1649     .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
1650     .set_hard_min_gfxclk_by_freq = smu10_set_hard_min_gfxclk_by_freq,
1651     .set_soft_max_gfxclk_by_freq = smu10_set_soft_max_gfxclk_by_freq,
1652     .get_power_profile_mode = smu10_get_power_profile_mode,
1653     .set_power_profile_mode = smu10_set_power_profile_mode,
1654     .asic_reset = smu10_asic_reset,
1655     .set_fine_grain_clk_vol = smu10_set_fine_grain_clk_vol,
1656     .gfx_state_change = smu10_gfx_state_change,
1657 };
1658 
1659 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1660 {
1661     hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1662     hwmgr->pptable_func = &pptable_funcs;
1663     return 0;
1664 }