Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2015 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "pp_debug.h"
0024 #include <linux/types.h>
0025 #include <linux/kernel.h>
0026 #include <linux/slab.h>
0027 #include "atom-types.h"
0028 #include "atombios.h"
0029 #include "processpptables.h"
0030 #include "cgs_common.h"
0031 #include "smu/smu_8_0_d.h"
0032 #include "smu8_fusion.h"
0033 #include "smu/smu_8_0_sh_mask.h"
0034 #include "smumgr.h"
0035 #include "hwmgr.h"
0036 #include "hardwaremanager.h"
0037 #include "cz_ppsmc.h"
0038 #include "smu8_hwmgr.h"
0039 #include "power_state.h"
0040 #include "pp_thermal.h"
0041 
0042 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
0043 #define CURRENT_NB_VID_MASK 0xff000000
0044 #define CURRENT_NB_VID__SHIFT 24
0045 #define ixSMUSVI_GFX_CURRENTVID  0xD8230048
0046 #define CURRENT_GFX_VID_MASK 0xff000000
0047 #define CURRENT_GFX_VID__SHIFT 24
0048 
0049 static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
0050 
0051 static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
0052 {
0053     if (smu8_magic != hw_ps->magic)
0054         return NULL;
0055 
0056     return (struct smu8_power_state *)hw_ps;
0057 }
0058 
0059 static const struct smu8_power_state *cast_const_smu8_power_state(
0060                 const struct pp_hw_power_state *hw_ps)
0061 {
0062     if (smu8_magic != hw_ps->magic)
0063         return NULL;
0064 
0065     return (struct smu8_power_state *)hw_ps;
0066 }
0067 
0068 static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
0069                     uint32_t clock, uint32_t msg)
0070 {
0071     int i = 0;
0072     struct phm_vce_clock_voltage_dependency_table *ptable =
0073         hwmgr->dyn_state.vce_clock_voltage_dependency_table;
0074 
0075     switch (msg) {
0076     case PPSMC_MSG_SetEclkSoftMin:
0077     case PPSMC_MSG_SetEclkHardMin:
0078         for (i = 0; i < (int)ptable->count; i++) {
0079             if (clock <= ptable->entries[i].ecclk)
0080                 break;
0081         }
0082         break;
0083 
0084     case PPSMC_MSG_SetEclkSoftMax:
0085     case PPSMC_MSG_SetEclkHardMax:
0086         for (i = ptable->count - 1; i >= 0; i--) {
0087             if (clock >= ptable->entries[i].ecclk)
0088                 break;
0089         }
0090         break;
0091 
0092     default:
0093         break;
0094     }
0095 
0096     return i;
0097 }
0098 
0099 static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
0100                 uint32_t clock, uint32_t msg)
0101 {
0102     int i = 0;
0103     struct phm_clock_voltage_dependency_table *table =
0104                 hwmgr->dyn_state.vddc_dependency_on_sclk;
0105 
0106     switch (msg) {
0107     case PPSMC_MSG_SetSclkSoftMin:
0108     case PPSMC_MSG_SetSclkHardMin:
0109         for (i = 0; i < (int)table->count; i++) {
0110             if (clock <= table->entries[i].clk)
0111                 break;
0112         }
0113         break;
0114 
0115     case PPSMC_MSG_SetSclkSoftMax:
0116     case PPSMC_MSG_SetSclkHardMax:
0117         for (i = table->count - 1; i >= 0; i--) {
0118             if (clock >= table->entries[i].clk)
0119                 break;
0120         }
0121         break;
0122 
0123     default:
0124         break;
0125     }
0126     return i;
0127 }
0128 
0129 static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
0130                     uint32_t clock, uint32_t msg)
0131 {
0132     int i = 0;
0133     struct phm_uvd_clock_voltage_dependency_table *ptable =
0134         hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
0135 
0136     switch (msg) {
0137     case PPSMC_MSG_SetUvdSoftMin:
0138     case PPSMC_MSG_SetUvdHardMin:
0139         for (i = 0; i < (int)ptable->count; i++) {
0140             if (clock <= ptable->entries[i].vclk)
0141                 break;
0142         }
0143         break;
0144 
0145     case PPSMC_MSG_SetUvdSoftMax:
0146     case PPSMC_MSG_SetUvdHardMax:
0147         for (i = ptable->count - 1; i >= 0; i--) {
0148             if (clock >= ptable->entries[i].vclk)
0149                 break;
0150         }
0151         break;
0152 
0153     default:
0154         break;
0155     }
0156 
0157     return i;
0158 }
0159 
0160 static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
0161 {
0162     struct smu8_hwmgr *data = hwmgr->backend;
0163 
0164     if (data->max_sclk_level == 0) {
0165         smum_send_msg_to_smc(hwmgr,
0166                 PPSMC_MSG_GetMaxSclkLevel,
0167                 &data->max_sclk_level);
0168         data->max_sclk_level += 1;
0169     }
0170 
0171     return data->max_sclk_level;
0172 }
0173 
0174 static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
0175 {
0176     struct smu8_hwmgr *data = hwmgr->backend;
0177     struct amdgpu_device *adev = hwmgr->adev;
0178 
0179     data->gfx_ramp_step = 256*25/100;
0180     data->gfx_ramp_delay = 1; /* by default, we delay 1us */
0181 
0182     data->mgcg_cgtt_local0 = 0x00000000;
0183     data->mgcg_cgtt_local1 = 0x00000000;
0184     data->clock_slow_down_freq = 25000;
0185     data->skip_clock_slow_down = 1;
0186     data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
0187     data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
0188     data->voting_rights_clients = 0x00C00033;
0189     data->static_screen_threshold = 8;
0190     data->ddi_power_gating_disabled = 0;
0191     data->bapm_enabled = 1;
0192     data->voltage_drop_threshold = 0;
0193     data->gfx_power_gating_threshold = 500;
0194     data->vce_slow_sclk_threshold = 20000;
0195     data->dce_slow_sclk_threshold = 30000;
0196     data->disable_driver_thermal_policy = 1;
0197     data->disable_nb_ps3_in_battery = 0;
0198 
0199     phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
0200                             PHM_PlatformCaps_ABM);
0201 
0202     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0203                     PHM_PlatformCaps_NonABMSupportInPPLib);
0204 
0205     phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
0206                     PHM_PlatformCaps_DynamicM3Arbiter);
0207 
0208     data->override_dynamic_mgpg = 1;
0209 
0210     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0211                   PHM_PlatformCaps_DynamicPatchPowerState);
0212 
0213     data->thermal_auto_throttling_treshold = 0;
0214     data->tdr_clock = 0;
0215     data->disable_gfx_power_gating_in_uvd = 0;
0216 
0217     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0218                     PHM_PlatformCaps_DynamicUVDState);
0219 
0220     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0221             PHM_PlatformCaps_UVDDPM);
0222     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0223             PHM_PlatformCaps_VCEDPM);
0224 
0225     data->cc6_settings.cpu_cc6_disable = false;
0226     data->cc6_settings.cpu_pstate_disable = false;
0227     data->cc6_settings.nb_pstate_switch_disable = false;
0228     data->cc6_settings.cpu_pstate_separation_time = 0;
0229 
0230     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0231                    PHM_PlatformCaps_DisableVoltageIsland);
0232 
0233     phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
0234               PHM_PlatformCaps_UVDPowerGating);
0235     phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
0236               PHM_PlatformCaps_VCEPowerGating);
0237 
0238     if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
0239         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0240                   PHM_PlatformCaps_UVDPowerGating);
0241     if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
0242         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0243                   PHM_PlatformCaps_VCEPowerGating);
0244 
0245 
0246     return 0;
0247 }
0248 
0249 /* convert form 8bit vid to real voltage in mV*4 */
0250 static uint32_t smu8_convert_8Bit_index_to_voltage(
0251             struct pp_hwmgr *hwmgr, uint16_t voltage)
0252 {
0253     return 6200 - (voltage * 25);
0254 }
0255 
0256 static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
0257             struct phm_clock_and_voltage_limits *table)
0258 {
0259     struct smu8_hwmgr *data = hwmgr->backend;
0260     struct smu8_sys_info *sys_info = &data->sys_info;
0261     struct phm_clock_voltage_dependency_table *dep_table =
0262                 hwmgr->dyn_state.vddc_dependency_on_sclk;
0263 
0264     if (dep_table->count > 0) {
0265         table->sclk = dep_table->entries[dep_table->count-1].clk;
0266         table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
0267            (uint16_t)dep_table->entries[dep_table->count-1].v);
0268     }
0269     table->mclk = sys_info->nbp_memory_clock[0];
0270     return 0;
0271 }
0272 
0273 static int smu8_init_dynamic_state_adjustment_rule_settings(
0274             struct pp_hwmgr *hwmgr,
0275             ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
0276 {
0277     struct phm_clock_voltage_dependency_table *table_clk_vlt;
0278 
0279     table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 8),
0280                 GFP_KERNEL);
0281 
0282     if (NULL == table_clk_vlt) {
0283         pr_err("Can not allocate memory!\n");
0284         return -ENOMEM;
0285     }
0286 
0287     table_clk_vlt->count = 8;
0288     table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
0289     table_clk_vlt->entries[0].v = 0;
0290     table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
0291     table_clk_vlt->entries[1].v = 1;
0292     table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
0293     table_clk_vlt->entries[2].v = 2;
0294     table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
0295     table_clk_vlt->entries[3].v = 3;
0296     table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
0297     table_clk_vlt->entries[4].v = 4;
0298     table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
0299     table_clk_vlt->entries[5].v = 5;
0300     table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
0301     table_clk_vlt->entries[6].v = 6;
0302     table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
0303     table_clk_vlt->entries[7].v = 7;
0304     hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
0305 
0306     return 0;
0307 }
0308 
0309 static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
0310 {
0311     struct smu8_hwmgr *data = hwmgr->backend;
0312     ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
0313     uint32_t i;
0314     int result = 0;
0315     uint8_t frev, crev;
0316     uint16_t size;
0317 
0318     info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
0319             GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
0320             &size, &frev, &crev);
0321 
0322     if (info == NULL) {
0323         pr_err("Could not retrieve the Integrated System Info Table!\n");
0324         return -EINVAL;
0325     }
0326 
0327     if (crev != 9) {
0328         pr_err("Unsupported IGP table: %d %d\n", frev, crev);
0329         return -EINVAL;
0330     }
0331 
0332     data->sys_info.bootup_uma_clock =
0333                    le32_to_cpu(info->ulBootUpUMAClock);
0334 
0335     data->sys_info.bootup_engine_clock =
0336                 le32_to_cpu(info->ulBootUpEngineClock);
0337 
0338     data->sys_info.dentist_vco_freq =
0339                    le32_to_cpu(info->ulDentistVCOFreq);
0340 
0341     data->sys_info.system_config =
0342                      le32_to_cpu(info->ulSystemConfig);
0343 
0344     data->sys_info.bootup_nb_voltage_index =
0345                   le16_to_cpu(info->usBootUpNBVoltage);
0346 
0347     data->sys_info.htc_hyst_lmt =
0348             (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
0349 
0350     data->sys_info.htc_tmp_lmt =
0351             (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
0352 
0353     if (data->sys_info.htc_tmp_lmt <=
0354             data->sys_info.htc_hyst_lmt) {
0355         pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
0356         return -EINVAL;
0357     }
0358 
0359     data->sys_info.nb_dpm_enable =
0360                 data->enable_nb_ps_policy &&
0361                 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
0362 
0363     for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
0364         if (i < SMU8_NUM_NBPMEMORYCLOCK) {
0365             data->sys_info.nbp_memory_clock[i] =
0366               le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
0367         }
0368         data->sys_info.nbp_n_clock[i] =
0369                 le32_to_cpu(info->ulNbpStateNClkFreq[i]);
0370     }
0371 
0372     for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
0373         data->sys_info.display_clock[i] =
0374                     le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
0375     }
0376 
0377     /* Here use 4 levels, make sure not exceed */
0378     for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
0379         data->sys_info.nbp_voltage_index[i] =
0380                  le16_to_cpu(info->usNBPStateVoltage[i]);
0381     }
0382 
0383     if (!data->sys_info.nb_dpm_enable) {
0384         for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
0385             if (i < SMU8_NUM_NBPMEMORYCLOCK) {
0386                 data->sys_info.nbp_memory_clock[i] =
0387                     data->sys_info.nbp_memory_clock[0];
0388             }
0389             data->sys_info.nbp_n_clock[i] =
0390                     data->sys_info.nbp_n_clock[0];
0391             data->sys_info.nbp_voltage_index[i] =
0392                     data->sys_info.nbp_voltage_index[0];
0393         }
0394     }
0395 
0396     if (le32_to_cpu(info->ulGPUCapInfo) &
0397         SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
0398         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
0399                     PHM_PlatformCaps_EnableDFSBypass);
0400     }
0401 
0402     data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
0403 
0404     smu8_construct_max_power_limits_table (hwmgr,
0405                     &hwmgr->dyn_state.max_clock_voltage_on_ac);
0406 
0407     smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
0408                     &info->sDISPCLK_Voltage[0]);
0409 
0410     return result;
0411 }
0412 
0413 static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
0414 {
0415     struct smu8_hwmgr *data = hwmgr->backend;
0416 
0417     data->boot_power_level.engineClock =
0418                 data->sys_info.bootup_engine_clock;
0419 
0420     data->boot_power_level.vddcIndex =
0421             (uint8_t)data->sys_info.bootup_nb_voltage_index;
0422 
0423     data->boot_power_level.dsDividerIndex = 0;
0424     data->boot_power_level.ssDividerIndex = 0;
0425     data->boot_power_level.allowGnbSlow = 1;
0426     data->boot_power_level.forceNBPstate = 0;
0427     data->boot_power_level.hysteresis_up = 0;
0428     data->boot_power_level.numSIMDToPowerDown = 0;
0429     data->boot_power_level.display_wm = 0;
0430     data->boot_power_level.vce_wm = 0;
0431 
0432     return 0;
0433 }
0434 
0435 static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
0436 {
0437     struct SMU8_Fusion_ClkTable *clock_table;
0438     int ret;
0439     uint32_t i;
0440     void *table = NULL;
0441     pp_atomctrl_clock_dividers_kong dividers;
0442 
0443     struct phm_clock_voltage_dependency_table *vddc_table =
0444         hwmgr->dyn_state.vddc_dependency_on_sclk;
0445     struct phm_clock_voltage_dependency_table *vdd_gfx_table =
0446         hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
0447     struct phm_acp_clock_voltage_dependency_table *acp_table =
0448         hwmgr->dyn_state.acp_clock_voltage_dependency_table;
0449     struct phm_uvd_clock_voltage_dependency_table *uvd_table =
0450         hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
0451     struct phm_vce_clock_voltage_dependency_table *vce_table =
0452         hwmgr->dyn_state.vce_clock_voltage_dependency_table;
0453 
0454     if (!hwmgr->need_pp_table_upload)
0455         return 0;
0456 
0457     ret = smum_download_powerplay_table(hwmgr, &table);
0458 
0459     PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
0460                 "Fail to get clock table from SMU!", return -EINVAL;);
0461 
0462     clock_table = (struct SMU8_Fusion_ClkTable *)table;
0463 
0464     /* patch clock table */
0465     PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
0466                 "Dependency table entry exceeds max limit!", return -EINVAL;);
0467     PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
0468                 "Dependency table entry exceeds max limit!", return -EINVAL;);
0469     PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
0470                 "Dependency table entry exceeds max limit!", return -EINVAL;);
0471     PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
0472                 "Dependency table entry exceeds max limit!", return -EINVAL;);
0473     PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
0474                 "Dependency table entry exceeds max limit!", return -EINVAL;);
0475 
0476     for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
0477 
0478         /* vddc_sclk */
0479         clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
0480             (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
0481         clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
0482             (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
0483 
0484         atomctrl_get_engine_pll_dividers_kong(hwmgr,
0485                               clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
0486                               &dividers);
0487 
0488         clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
0489             (uint8_t)dividers.pll_post_divider;
0490 
0491         /* vddgfx_sclk */
0492         clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
0493             (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
0494 
0495         /* acp breakdown */
0496         clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
0497             (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
0498         clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
0499             (i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
0500 
0501         atomctrl_get_engine_pll_dividers_kong(hwmgr,
0502                               clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
0503                               &dividers);
0504 
0505         clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
0506             (uint8_t)dividers.pll_post_divider;
0507 
0508 
0509         /* uvd breakdown */
0510         clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
0511             (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
0512         clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
0513             (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
0514 
0515         atomctrl_get_engine_pll_dividers_kong(hwmgr,
0516                               clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
0517                               &dividers);
0518 
0519         clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
0520             (uint8_t)dividers.pll_post_divider;
0521 
0522         clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
0523             (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
0524         clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
0525             (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
0526 
0527         atomctrl_get_engine_pll_dividers_kong(hwmgr,
0528                               clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
0529                               &dividers);
0530 
0531         clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
0532             (uint8_t)dividers.pll_post_divider;
0533 
0534         /* vce breakdown */
0535         clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
0536             (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
0537         clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
0538             (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
0539 
0540 
0541         atomctrl_get_engine_pll_dividers_kong(hwmgr,
0542                               clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
0543                               &dividers);
0544 
0545         clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
0546             (uint8_t)dividers.pll_post_divider;
0547 
0548     }
0549     ret = smum_upload_powerplay_table(hwmgr);
0550 
0551     return ret;
0552 }
0553 
0554 static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
0555 {
0556     struct smu8_hwmgr *data = hwmgr->backend;
0557     struct phm_clock_voltage_dependency_table *table =
0558                     hwmgr->dyn_state.vddc_dependency_on_sclk;
0559     unsigned long clock = 0, level;
0560 
0561     if (NULL == table || table->count <= 0)
0562         return -EINVAL;
0563 
0564     data->sclk_dpm.soft_min_clk = table->entries[0].clk;
0565     data->sclk_dpm.hard_min_clk = table->entries[0].clk;
0566 
0567     level = smu8_get_max_sclk_level(hwmgr) - 1;
0568 
0569     if (level < table->count)
0570         clock = table->entries[level].clk;
0571     else
0572         clock = table->entries[table->count - 1].clk;
0573 
0574     data->sclk_dpm.soft_max_clk = clock;
0575     data->sclk_dpm.hard_max_clk = clock;
0576 
0577     return 0;
0578 }
0579 
0580 static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
0581 {
0582     struct smu8_hwmgr *data = hwmgr->backend;
0583     struct phm_uvd_clock_voltage_dependency_table *table =
0584                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
0585     unsigned long clock = 0;
0586     uint32_t level;
0587 
0588     if (NULL == table || table->count <= 0)
0589         return -EINVAL;
0590 
0591     data->uvd_dpm.soft_min_clk = 0;
0592     data->uvd_dpm.hard_min_clk = 0;
0593 
0594     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
0595 
0596     if (level < table->count)
0597         clock = table->entries[level].vclk;
0598     else
0599         clock = table->entries[table->count - 1].vclk;
0600 
0601     data->uvd_dpm.soft_max_clk = clock;
0602     data->uvd_dpm.hard_max_clk = clock;
0603 
0604     return 0;
0605 }
0606 
0607 static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
0608 {
0609     struct smu8_hwmgr *data = hwmgr->backend;
0610     struct phm_vce_clock_voltage_dependency_table *table =
0611                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
0612     unsigned long clock = 0;
0613     uint32_t level;
0614 
0615     if (NULL == table || table->count <= 0)
0616         return -EINVAL;
0617 
0618     data->vce_dpm.soft_min_clk = 0;
0619     data->vce_dpm.hard_min_clk = 0;
0620 
0621     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
0622 
0623     if (level < table->count)
0624         clock = table->entries[level].ecclk;
0625     else
0626         clock = table->entries[table->count - 1].ecclk;
0627 
0628     data->vce_dpm.soft_max_clk = clock;
0629     data->vce_dpm.hard_max_clk = clock;
0630 
0631     return 0;
0632 }
0633 
0634 static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
0635 {
0636     struct smu8_hwmgr *data = hwmgr->backend;
0637     struct phm_acp_clock_voltage_dependency_table *table =
0638                 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
0639     unsigned long clock = 0;
0640     uint32_t level;
0641 
0642     if (NULL == table || table->count <= 0)
0643         return -EINVAL;
0644 
0645     data->acp_dpm.soft_min_clk = 0;
0646     data->acp_dpm.hard_min_clk = 0;
0647 
0648     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
0649 
0650     if (level < table->count)
0651         clock = table->entries[level].acpclk;
0652     else
0653         clock = table->entries[table->count - 1].acpclk;
0654 
0655     data->acp_dpm.soft_max_clk = clock;
0656     data->acp_dpm.hard_max_clk = clock;
0657     return 0;
0658 }
0659 
0660 static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
0661 {
0662     struct smu8_hwmgr *data = hwmgr->backend;
0663 
0664     data->uvd_power_gated = false;
0665     data->vce_power_gated = false;
0666     data->samu_power_gated = false;
0667 #ifdef CONFIG_DRM_AMD_ACP
0668     data->acp_power_gated = false;
0669 #else
0670     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
0671     data->acp_power_gated = true;
0672 #endif
0673 
0674 }
0675 
0676 static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
0677 {
0678     struct smu8_hwmgr *data = hwmgr->backend;
0679 
0680     data->low_sclk_interrupt_threshold = 0;
0681 }
0682 
0683 static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
0684 {
0685     struct smu8_hwmgr *data = hwmgr->backend;
0686     struct phm_clock_voltage_dependency_table *table =
0687                     hwmgr->dyn_state.vddc_dependency_on_sclk;
0688 
0689     unsigned long clock = 0;
0690     unsigned long level;
0691     unsigned long stable_pstate_sclk;
0692     unsigned long percentage;
0693 
0694     data->sclk_dpm.soft_min_clk = table->entries[0].clk;
0695     level = smu8_get_max_sclk_level(hwmgr) - 1;
0696 
0697     if (level < table->count)
0698         data->sclk_dpm.soft_max_clk  = table->entries[level].clk;
0699     else
0700         data->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
0701 
0702     clock = hwmgr->display_config->min_core_set_clock;
0703     if (clock == 0)
0704         pr_debug("min_core_set_clock not set\n");
0705 
0706     if (data->sclk_dpm.hard_min_clk != clock) {
0707         data->sclk_dpm.hard_min_clk = clock;
0708 
0709         smum_send_msg_to_smc_with_parameter(hwmgr,
0710                         PPSMC_MSG_SetSclkHardMin,
0711                          smu8_get_sclk_level(hwmgr,
0712                     data->sclk_dpm.hard_min_clk,
0713                          PPSMC_MSG_SetSclkHardMin),
0714                          NULL);
0715     }
0716 
0717     clock = data->sclk_dpm.soft_min_clk;
0718 
0719     /* update minimum clocks for Stable P-State feature */
0720     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
0721                      PHM_PlatformCaps_StablePState)) {
0722         percentage = 75;
0723         /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
0724         stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
0725                     percentage) / 100;
0726 
0727         if (clock < stable_pstate_sclk)
0728             clock = stable_pstate_sclk;
0729     }
0730 
0731     if (data->sclk_dpm.soft_min_clk != clock) {
0732         data->sclk_dpm.soft_min_clk = clock;
0733         smum_send_msg_to_smc_with_parameter(hwmgr,
0734                         PPSMC_MSG_SetSclkSoftMin,
0735                         smu8_get_sclk_level(hwmgr,
0736                     data->sclk_dpm.soft_min_clk,
0737                          PPSMC_MSG_SetSclkSoftMin),
0738                         NULL);
0739     }
0740 
0741     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
0742                     PHM_PlatformCaps_StablePState) &&
0743              data->sclk_dpm.soft_max_clk != clock) {
0744         data->sclk_dpm.soft_max_clk = clock;
0745         smum_send_msg_to_smc_with_parameter(hwmgr,
0746                         PPSMC_MSG_SetSclkSoftMax,
0747                         smu8_get_sclk_level(hwmgr,
0748                     data->sclk_dpm.soft_max_clk,
0749                     PPSMC_MSG_SetSclkSoftMax),
0750                         NULL);
0751     }
0752 
0753     return 0;
0754 }
0755 
0756 static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
0757 {
0758     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
0759                 PHM_PlatformCaps_SclkDeepSleep)) {
0760         uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
0761         if (clks == 0)
0762             clks = SMU8_MIN_DEEP_SLEEP_SCLK;
0763 
0764         PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
0765 
0766         smum_send_msg_to_smc_with_parameter(hwmgr,
0767                 PPSMC_MSG_SetMinDeepSleepSclk,
0768                 clks,
0769                 NULL);
0770     }
0771 
0772     return 0;
0773 }
0774 
0775 static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
0776 {
0777     struct smu8_hwmgr *data =
0778                   hwmgr->backend;
0779 
0780     smum_send_msg_to_smc_with_parameter(hwmgr,
0781                     PPSMC_MSG_SetWatermarkFrequency,
0782                     data->sclk_dpm.soft_max_clk,
0783                     NULL);
0784 
0785     return 0;
0786 }
0787 
0788 static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
0789 {
0790     struct smu8_hwmgr *hw_data = hwmgr->backend;
0791 
0792     if (hw_data->is_nb_dpm_enabled) {
0793         if (enable) {
0794             PP_DBG_LOG("enable Low Memory PState.\n");
0795 
0796             return smum_send_msg_to_smc_with_parameter(hwmgr,
0797                         PPSMC_MSG_EnableLowMemoryPstate,
0798                         (lock ? 1 : 0),
0799                         NULL);
0800         } else {
0801             PP_DBG_LOG("disable Low Memory PState.\n");
0802 
0803             return smum_send_msg_to_smc_with_parameter(hwmgr,
0804                         PPSMC_MSG_DisableLowMemoryPstate,
0805                         (lock ? 1 : 0),
0806                         NULL);
0807         }
0808     }
0809 
0810     return 0;
0811 }
0812 
0813 static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
0814 {
0815     int ret = 0;
0816 
0817     struct smu8_hwmgr *data = hwmgr->backend;
0818     unsigned long dpm_features = 0;
0819 
0820     if (data->is_nb_dpm_enabled) {
0821         smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
0822         dpm_features |= NB_DPM_MASK;
0823         ret = smum_send_msg_to_smc_with_parameter(
0824                               hwmgr,
0825                               PPSMC_MSG_DisableAllSmuFeatures,
0826                               dpm_features,
0827                               NULL);
0828         if (ret == 0)
0829             data->is_nb_dpm_enabled = false;
0830     }
0831 
0832     return ret;
0833 }
0834 
0835 static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
0836 {
0837     int ret = 0;
0838 
0839     struct smu8_hwmgr *data = hwmgr->backend;
0840     unsigned long dpm_features = 0;
0841 
0842     if (!data->is_nb_dpm_enabled) {
0843         PP_DBG_LOG("enabling ALL SMU features.\n");
0844         dpm_features |= NB_DPM_MASK;
0845         ret = smum_send_msg_to_smc_with_parameter(
0846                               hwmgr,
0847                               PPSMC_MSG_EnableAllSmuFeatures,
0848                               dpm_features,
0849                               NULL);
0850         if (ret == 0)
0851             data->is_nb_dpm_enabled = true;
0852     }
0853 
0854     return ret;
0855 }
0856 
0857 static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
0858 {
0859     bool disable_switch;
0860     bool enable_low_mem_state;
0861     struct smu8_hwmgr *hw_data = hwmgr->backend;
0862     const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
0863     const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
0864 
0865     if (hw_data->sys_info.nb_dpm_enable) {
0866         disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
0867         enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
0868 
0869         if (pnew_state->action == FORCE_HIGH)
0870             smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
0871         else if (pnew_state->action == CANCEL_FORCE_HIGH)
0872             smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
0873         else
0874             smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
0875     }
0876     return 0;
0877 }
0878 
0879 static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
0880 {
0881     int ret = 0;
0882 
0883     smu8_update_sclk_limit(hwmgr);
0884     smu8_set_deep_sleep_sclk_threshold(hwmgr);
0885     smu8_set_watermark_threshold(hwmgr);
0886     ret = smu8_enable_nb_dpm(hwmgr);
0887     if (ret)
0888         return ret;
0889     smu8_update_low_mem_pstate(hwmgr, input);
0890 
0891     return 0;
0892 }
0893 
0894 
0895 static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
0896 {
0897     int ret;
0898 
0899     ret = smu8_upload_pptable_to_smu(hwmgr);
0900     if (ret)
0901         return ret;
0902     ret = smu8_init_sclk_limit(hwmgr);
0903     if (ret)
0904         return ret;
0905     ret = smu8_init_uvd_limit(hwmgr);
0906     if (ret)
0907         return ret;
0908     ret = smu8_init_vce_limit(hwmgr);
0909     if (ret)
0910         return ret;
0911     ret = smu8_init_acp_limit(hwmgr);
0912     if (ret)
0913         return ret;
0914 
0915     smu8_init_power_gate_state(hwmgr);
0916     smu8_init_sclk_threshold(hwmgr);
0917 
0918     return 0;
0919 }
0920 
0921 static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
0922 {
0923     struct smu8_hwmgr *hw_data = hwmgr->backend;
0924 
0925     hw_data->disp_clk_bypass_pending = false;
0926     hw_data->disp_clk_bypass = false;
0927 }
0928 
0929 static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
0930 {
0931     struct smu8_hwmgr *hw_data = hwmgr->backend;
0932 
0933     hw_data->is_nb_dpm_enabled = false;
0934 }
0935 
0936 static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
0937 {
0938     struct smu8_hwmgr *hw_data = hwmgr->backend;
0939 
0940     hw_data->cc6_settings.cc6_setting_changed = false;
0941     hw_data->cc6_settings.cpu_pstate_separation_time = 0;
0942     hw_data->cc6_settings.cpu_cc6_disable = false;
0943     hw_data->cc6_settings.cpu_pstate_disable = false;
0944 }
0945 
0946 static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
0947 {
0948     cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
0949                 ixCG_FREQ_TRAN_VOTING_0,
0950                 SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
0951 }
0952 
0953 static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
0954 {
0955     cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
0956                 ixCG_FREQ_TRAN_VOTING_0, 0);
0957 }
0958 
0959 static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
0960 {
0961     struct smu8_hwmgr *data = hwmgr->backend;
0962 
0963     data->dpm_flags |= DPMFlags_SCLK_Enabled;
0964 
0965     return smum_send_msg_to_smc_with_parameter(hwmgr,
0966                 PPSMC_MSG_EnableAllSmuFeatures,
0967                 SCLK_DPM_MASK,
0968                 NULL);
0969 }
0970 
0971 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
0972 {
0973     int ret = 0;
0974     struct smu8_hwmgr *data = hwmgr->backend;
0975     unsigned long dpm_features = 0;
0976 
0977     if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
0978         dpm_features |= SCLK_DPM_MASK;
0979         data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
0980         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
0981                     PPSMC_MSG_DisableAllSmuFeatures,
0982                     dpm_features,
0983                     NULL);
0984     }
0985     return ret;
0986 }
0987 
0988 static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
0989 {
0990     struct smu8_hwmgr *data = hwmgr->backend;
0991 
0992     data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
0993     data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
0994 
0995     smum_send_msg_to_smc_with_parameter(hwmgr,
0996                 PPSMC_MSG_SetSclkSoftMin,
0997                 smu8_get_sclk_level(hwmgr,
0998                 data->sclk_dpm.soft_min_clk,
0999                 PPSMC_MSG_SetSclkSoftMin),
1000                 NULL);
1001 
1002     smum_send_msg_to_smc_with_parameter(hwmgr,
1003                 PPSMC_MSG_SetSclkSoftMax,
1004                 smu8_get_sclk_level(hwmgr,
1005                 data->sclk_dpm.soft_max_clk,
1006                 PPSMC_MSG_SetSclkSoftMax),
1007                 NULL);
1008 
1009     return 0;
1010 }
1011 
1012 static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1013 {
1014     struct smu8_hwmgr *data = hwmgr->backend;
1015 
1016     data->acp_boot_level = 0xff;
1017 }
1018 
1019 static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1020 {
1021     smu8_program_voting_clients(hwmgr);
1022     if (smu8_start_dpm(hwmgr))
1023         return -EINVAL;
1024     smu8_program_bootup_state(hwmgr);
1025     smu8_reset_acp_boot_level(hwmgr);
1026 
1027     return 0;
1028 }
1029 
1030 static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1031 {
1032     smu8_disable_nb_dpm(hwmgr);
1033 
1034     smu8_clear_voting_clients(hwmgr);
1035     if (smu8_stop_dpm(hwmgr))
1036         return -EINVAL;
1037 
1038     return 0;
1039 }
1040 
1041 static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
1042 {
1043     smu8_disable_dpm_tasks(hwmgr);
1044     smu8_power_up_display_clock_sys_pll(hwmgr);
1045     smu8_clear_nb_dpm_flag(hwmgr);
1046     smu8_reset_cc6_data(hwmgr);
1047     return 0;
1048 }
1049 
1050 static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1051                 struct pp_power_state  *prequest_ps,
1052             const struct pp_power_state *pcurrent_ps)
1053 {
1054     struct smu8_power_state *smu8_ps =
1055                 cast_smu8_power_state(&prequest_ps->hardware);
1056 
1057     const struct smu8_power_state *smu8_current_ps =
1058                 cast_const_smu8_power_state(&pcurrent_ps->hardware);
1059 
1060     struct smu8_hwmgr *data = hwmgr->backend;
1061     struct PP_Clocks clocks = {0, 0, 0, 0};
1062     bool force_high;
1063 
1064     smu8_ps->need_dfs_bypass = true;
1065 
1066     data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1067 
1068     clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
1069                 hwmgr->display_config->min_mem_set_clock :
1070                 data->sys_info.nbp_memory_clock[1];
1071 
1072 
1073     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1074         clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1075 
1076     force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1077             || (hwmgr->display_config->num_display >= 3);
1078 
1079     smu8_ps->action = smu8_current_ps->action;
1080 
1081     if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1082         smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1083     else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1084         smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1085     else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1086         smu8_ps->action = CANCEL_FORCE_HIGH;
1087     else if (force_high && (smu8_ps->action != FORCE_HIGH))
1088         smu8_ps->action = FORCE_HIGH;
1089     else
1090         smu8_ps->action = DO_NOTHING;
1091 
1092     return 0;
1093 }
1094 
1095 static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1096 {
1097     int result = 0;
1098     struct smu8_hwmgr *data;
1099 
1100     data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1101     if (data == NULL)
1102         return -ENOMEM;
1103 
1104     hwmgr->backend = data;
1105 
1106     result = smu8_initialize_dpm_defaults(hwmgr);
1107     if (result != 0) {
1108         pr_err("smu8_initialize_dpm_defaults failed\n");
1109         return result;
1110     }
1111 
1112     result = smu8_get_system_info_data(hwmgr);
1113     if (result != 0) {
1114         pr_err("smu8_get_system_info_data failed\n");
1115         return result;
1116     }
1117 
1118     smu8_construct_boot_state(hwmgr);
1119 
1120     hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  SMU8_MAX_HARDWARE_POWERLEVELS;
1121 
1122     return result;
1123 }
1124 
1125 static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1126 {
1127     if (hwmgr != NULL) {
1128         kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
1129         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1130 
1131         kfree(hwmgr->backend);
1132         hwmgr->backend = NULL;
1133     }
1134     return 0;
1135 }
1136 
1137 static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1138 {
1139     struct smu8_hwmgr *data = hwmgr->backend;
1140 
1141     smum_send_msg_to_smc_with_parameter(hwmgr,
1142                     PPSMC_MSG_SetSclkSoftMin,
1143                     smu8_get_sclk_level(hwmgr,
1144                     data->sclk_dpm.soft_max_clk,
1145                     PPSMC_MSG_SetSclkSoftMin),
1146                     NULL);
1147 
1148     smum_send_msg_to_smc_with_parameter(hwmgr,
1149                 PPSMC_MSG_SetSclkSoftMax,
1150                 smu8_get_sclk_level(hwmgr,
1151                 data->sclk_dpm.soft_max_clk,
1152                 PPSMC_MSG_SetSclkSoftMax),
1153                 NULL);
1154 
1155     return 0;
1156 }
1157 
1158 static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1159 {
1160     struct smu8_hwmgr *data = hwmgr->backend;
1161     struct phm_clock_voltage_dependency_table *table =
1162                 hwmgr->dyn_state.vddc_dependency_on_sclk;
1163     unsigned long clock = 0, level;
1164 
1165     if (NULL == table || table->count <= 0)
1166         return -EINVAL;
1167 
1168     data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1169     data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1170     hwmgr->pstate_sclk = table->entries[0].clk;
1171     hwmgr->pstate_mclk = 0;
1172 
1173     level = smu8_get_max_sclk_level(hwmgr) - 1;
1174 
1175     if (level < table->count)
1176         clock = table->entries[level].clk;
1177     else
1178         clock = table->entries[table->count - 1].clk;
1179 
1180     data->sclk_dpm.soft_max_clk = clock;
1181     data->sclk_dpm.hard_max_clk = clock;
1182 
1183     smum_send_msg_to_smc_with_parameter(hwmgr,
1184                 PPSMC_MSG_SetSclkSoftMin,
1185                 smu8_get_sclk_level(hwmgr,
1186                 data->sclk_dpm.soft_min_clk,
1187                 PPSMC_MSG_SetSclkSoftMin),
1188                 NULL);
1189 
1190     smum_send_msg_to_smc_with_parameter(hwmgr,
1191                 PPSMC_MSG_SetSclkSoftMax,
1192                 smu8_get_sclk_level(hwmgr,
1193                 data->sclk_dpm.soft_max_clk,
1194                 PPSMC_MSG_SetSclkSoftMax),
1195                 NULL);
1196 
1197     return 0;
1198 }
1199 
1200 static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1201 {
1202     struct smu8_hwmgr *data = hwmgr->backend;
1203 
1204     smum_send_msg_to_smc_with_parameter(hwmgr,
1205             PPSMC_MSG_SetSclkSoftMax,
1206             smu8_get_sclk_level(hwmgr,
1207             data->sclk_dpm.soft_min_clk,
1208             PPSMC_MSG_SetSclkSoftMax),
1209             NULL);
1210 
1211     smum_send_msg_to_smc_with_parameter(hwmgr,
1212                 PPSMC_MSG_SetSclkSoftMin,
1213                 smu8_get_sclk_level(hwmgr,
1214                 data->sclk_dpm.soft_min_clk,
1215                 PPSMC_MSG_SetSclkSoftMin),
1216                 NULL);
1217 
1218     return 0;
1219 }
1220 
1221 static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1222                 enum amd_dpm_forced_level level)
1223 {
1224     int ret = 0;
1225 
1226     switch (level) {
1227     case AMD_DPM_FORCED_LEVEL_HIGH:
1228     case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1229         ret = smu8_phm_force_dpm_highest(hwmgr);
1230         break;
1231     case AMD_DPM_FORCED_LEVEL_LOW:
1232     case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1233     case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1234         ret = smu8_phm_force_dpm_lowest(hwmgr);
1235         break;
1236     case AMD_DPM_FORCED_LEVEL_AUTO:
1237         ret = smu8_phm_unforce_dpm_levels(hwmgr);
1238         break;
1239     case AMD_DPM_FORCED_LEVEL_MANUAL:
1240     case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1241     default:
1242         break;
1243     }
1244 
1245     return ret;
1246 }
1247 
1248 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1249 {
1250     if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1251         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
1252     return 0;
1253 }
1254 
1255 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1256 {
1257     if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1258         return smum_send_msg_to_smc_with_parameter(
1259             hwmgr,
1260             PPSMC_MSG_UVDPowerON,
1261             PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
1262             NULL);
1263     }
1264 
1265     return 0;
1266 }
1267 
1268 static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1269 {
1270     struct smu8_hwmgr *data = hwmgr->backend;
1271     struct phm_vce_clock_voltage_dependency_table *ptable =
1272         hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1273 
1274     /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1275     if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1276         hwmgr->en_umd_pstate) {
1277         data->vce_dpm.hard_min_clk =
1278                   ptable->entries[ptable->count - 1].ecclk;
1279 
1280         smum_send_msg_to_smc_with_parameter(hwmgr,
1281             PPSMC_MSG_SetEclkHardMin,
1282             smu8_get_eclk_level(hwmgr,
1283                 data->vce_dpm.hard_min_clk,
1284                 PPSMC_MSG_SetEclkHardMin),
1285             NULL);
1286     } else {
1287 
1288         smum_send_msg_to_smc_with_parameter(hwmgr,
1289                     PPSMC_MSG_SetEclkHardMin,
1290                     0,
1291                     NULL);
1292         /* disable ECLK DPM 0. Otherwise VCE could hang if
1293          * switching SCLK from DPM 0 to 6/7 */
1294         smum_send_msg_to_smc_with_parameter(hwmgr,
1295                     PPSMC_MSG_SetEclkSoftMin,
1296                     1,
1297                     NULL);
1298     }
1299     return 0;
1300 }
1301 
1302 static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1303 {
1304     if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1305         return smum_send_msg_to_smc(hwmgr,
1306                         PPSMC_MSG_VCEPowerOFF,
1307                         NULL);
1308     return 0;
1309 }
1310 
1311 static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1312 {
1313     if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1314         return smum_send_msg_to_smc(hwmgr,
1315                         PPSMC_MSG_VCEPowerON,
1316                         NULL);
1317     return 0;
1318 }
1319 
1320 static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1321 {
1322     struct smu8_hwmgr *data = hwmgr->backend;
1323 
1324     return data->sys_info.bootup_uma_clock;
1325 }
1326 
1327 static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1328 {
1329     struct pp_power_state  *ps;
1330     struct smu8_power_state  *smu8_ps;
1331 
1332     if (hwmgr == NULL)
1333         return -EINVAL;
1334 
1335     ps = hwmgr->request_ps;
1336 
1337     if (ps == NULL)
1338         return -EINVAL;
1339 
1340     smu8_ps = cast_smu8_power_state(&ps->hardware);
1341 
1342     if (low)
1343         return smu8_ps->levels[0].engineClock;
1344     else
1345         return smu8_ps->levels[smu8_ps->level-1].engineClock;
1346 }
1347 
1348 static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1349                     struct pp_hw_power_state *hw_ps)
1350 {
1351     struct smu8_hwmgr *data = hwmgr->backend;
1352     struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1353 
1354     smu8_ps->level = 1;
1355     smu8_ps->nbps_flags = 0;
1356     smu8_ps->bapm_flags = 0;
1357     smu8_ps->levels[0] = data->boot_power_level;
1358 
1359     return 0;
1360 }
1361 
1362 static int smu8_dpm_get_pp_table_entry_callback(
1363                              struct pp_hwmgr *hwmgr,
1364                        struct pp_hw_power_state *hw_ps,
1365                               unsigned int index,
1366                              const void *clock_info)
1367 {
1368     struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1369 
1370     const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1371 
1372     struct phm_clock_voltage_dependency_table *table =
1373                     hwmgr->dyn_state.vddc_dependency_on_sclk;
1374     uint8_t clock_info_index = smu8_clock_info->index;
1375 
1376     if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1377         clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1378 
1379     smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1380     smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1381 
1382     smu8_ps->level = index + 1;
1383 
1384     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1385         smu8_ps->levels[index].dsDividerIndex = 5;
1386         smu8_ps->levels[index].ssDividerIndex = 5;
1387     }
1388 
1389     return 0;
1390 }
1391 
1392 static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1393 {
1394     int result;
1395     unsigned long ret = 0;
1396 
1397     result = pp_tables_get_num_of_entries(hwmgr, &ret);
1398 
1399     return result ? 0 : ret;
1400 }
1401 
1402 static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1403             unsigned long entry, struct pp_power_state *ps)
1404 {
1405     int result;
1406     struct smu8_power_state *smu8_ps;
1407 
1408     ps->hardware.magic = smu8_magic;
1409 
1410     smu8_ps = cast_smu8_power_state(&(ps->hardware));
1411 
1412     result = pp_tables_get_entry(hwmgr, entry, ps,
1413             smu8_dpm_get_pp_table_entry_callback);
1414 
1415     smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1416     smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1417 
1418     return result;
1419 }
1420 
1421 static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1422 {
1423     return sizeof(struct smu8_power_state);
1424 }
1425 
1426 static void smu8_hw_print_display_cfg(
1427     const struct cc6_settings *cc6_settings)
1428 {
1429     PP_DBG_LOG("New Display Configuration:\n");
1430 
1431     PP_DBG_LOG("   cpu_cc6_disable: %d\n",
1432             cc6_settings->cpu_cc6_disable);
1433     PP_DBG_LOG("   cpu_pstate_disable: %d\n",
1434             cc6_settings->cpu_pstate_disable);
1435     PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
1436             cc6_settings->nb_pstate_switch_disable);
1437     PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
1438             cc6_settings->cpu_pstate_separation_time);
1439 }
1440 
1441  static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1442 {
1443     struct smu8_hwmgr *hw_data = hwmgr->backend;
1444     uint32_t data = 0;
1445 
1446     if (hw_data->cc6_settings.cc6_setting_changed) {
1447 
1448         hw_data->cc6_settings.cc6_setting_changed = false;
1449 
1450         smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1451 
1452         data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1453             & PWRMGT_SEPARATION_TIME_MASK)
1454             << PWRMGT_SEPARATION_TIME_SHIFT;
1455 
1456         data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1457             << PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1458 
1459         data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1460             << PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1461 
1462         PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1463             data);
1464 
1465         smum_send_msg_to_smc_with_parameter(hwmgr,
1466                         PPSMC_MSG_SetDisplaySizePowerParams,
1467                         data,
1468                         NULL);
1469     }
1470 
1471     return 0;
1472 }
1473 
1474 
1475 static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1476             bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1477 {
1478     struct smu8_hwmgr *hw_data = hwmgr->backend;
1479 
1480     if (separation_time !=
1481         hw_data->cc6_settings.cpu_pstate_separation_time ||
1482         cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
1483         pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
1484         pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1485 
1486         hw_data->cc6_settings.cc6_setting_changed = true;
1487 
1488         hw_data->cc6_settings.cpu_pstate_separation_time =
1489             separation_time;
1490         hw_data->cc6_settings.cpu_cc6_disable =
1491             cc6_disable;
1492         hw_data->cc6_settings.cpu_pstate_disable =
1493             pstate_disable;
1494         hw_data->cc6_settings.nb_pstate_switch_disable =
1495             pstate_switch_disable;
1496 
1497     }
1498 
1499     return 0;
1500 }
1501 
1502 static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1503         struct amd_pp_simple_clock_info *info)
1504 {
1505     uint32_t i;
1506     const struct phm_clock_voltage_dependency_table *table =
1507             hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1508     const struct phm_clock_and_voltage_limits *limits =
1509             &hwmgr->dyn_state.max_clock_voltage_on_ac;
1510 
1511     info->engine_max_clock = limits->sclk;
1512     info->memory_max_clock = limits->mclk;
1513 
1514     for (i = table->count - 1; i > 0; i--) {
1515         if (limits->vddc >= table->entries[i].v) {
1516             info->level = table->entries[i].clk;
1517             return 0;
1518         }
1519     }
1520     return -EINVAL;
1521 }
1522 
1523 static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1524         enum pp_clock_type type, uint32_t mask)
1525 {
1526     switch (type) {
1527     case PP_SCLK:
1528         smum_send_msg_to_smc_with_parameter(hwmgr,
1529                 PPSMC_MSG_SetSclkSoftMin,
1530                 mask,
1531                 NULL);
1532         smum_send_msg_to_smc_with_parameter(hwmgr,
1533                 PPSMC_MSG_SetSclkSoftMax,
1534                 mask,
1535                 NULL);
1536         break;
1537     default:
1538         break;
1539     }
1540 
1541     return 0;
1542 }
1543 
1544 static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1545         enum pp_clock_type type, char *buf)
1546 {
1547     struct smu8_hwmgr *data = hwmgr->backend;
1548     struct phm_clock_voltage_dependency_table *sclk_table =
1549             hwmgr->dyn_state.vddc_dependency_on_sclk;
1550     uint32_t i, now;
1551     int size = 0;
1552 
1553     switch (type) {
1554     case PP_SCLK:
1555         now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1556                 CGS_IND_REG__SMC,
1557                 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1558                 TARGET_AND_CURRENT_PROFILE_INDEX,
1559                 CURR_SCLK_INDEX);
1560 
1561         for (i = 0; i < sclk_table->count; i++)
1562             size += sprintf(buf + size, "%d: %uMhz %s\n",
1563                     i, sclk_table->entries[i].clk / 100,
1564                     (i == now) ? "*" : "");
1565         break;
1566     case PP_MCLK:
1567         now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1568                 CGS_IND_REG__SMC,
1569                 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1570                 TARGET_AND_CURRENT_PROFILE_INDEX,
1571                 CURR_MCLK_INDEX);
1572 
1573         for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1574             size += sprintf(buf + size, "%d: %uMhz %s\n",
1575                     SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1576                     (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1577         break;
1578     default:
1579         break;
1580     }
1581     return size;
1582 }
1583 
1584 static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1585                 PHM_PerformanceLevelDesignation designation, uint32_t index,
1586                 PHM_PerformanceLevel *level)
1587 {
1588     const struct smu8_power_state *ps;
1589     struct smu8_hwmgr *data;
1590     uint32_t level_index;
1591     uint32_t i;
1592 
1593     if (level == NULL || hwmgr == NULL || state == NULL)
1594         return -EINVAL;
1595 
1596     data = hwmgr->backend;
1597     ps = cast_const_smu8_power_state(state);
1598 
1599     level_index = index > ps->level - 1 ? ps->level - 1 : index;
1600     level->coreClock = ps->levels[level_index].engineClock;
1601 
1602     if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
1603         for (i = 1; i < ps->level; i++) {
1604             if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
1605                 level->coreClock = ps->levels[i].engineClock;
1606                 break;
1607             }
1608         }
1609     }
1610 
1611     if (level_index == 0)
1612         level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1613     else
1614         level->memory_clock = data->sys_info.nbp_memory_clock[0];
1615 
1616     level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1617     level->nonLocalMemoryFreq = 0;
1618     level->nonLocalMemoryWidth = 0;
1619 
1620     return 0;
1621 }
1622 
1623 static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1624     const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1625 {
1626     const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1627 
1628     clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1629     clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
1630 
1631     return 0;
1632 }
1633 
1634 static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1635                         struct amd_pp_clocks *clocks)
1636 {
1637     struct smu8_hwmgr *data = hwmgr->backend;
1638     int i;
1639     struct phm_clock_voltage_dependency_table *table;
1640 
1641     clocks->count = smu8_get_max_sclk_level(hwmgr);
1642     switch (type) {
1643     case amd_pp_disp_clock:
1644         for (i = 0; i < clocks->count; i++)
1645             clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1646         break;
1647     case amd_pp_sys_clock:
1648         table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1649         for (i = 0; i < clocks->count; i++)
1650             clocks->clock[i] = table->entries[i].clk * 10;
1651         break;
1652     case amd_pp_mem_clock:
1653         clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1654         for (i = 0; i < clocks->count; i++)
1655             clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1656         break;
1657     default:
1658         return -1;
1659     }
1660 
1661     return 0;
1662 }
1663 
1664 static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1665 {
1666     struct phm_clock_voltage_dependency_table *table =
1667                     hwmgr->dyn_state.vddc_dependency_on_sclk;
1668     unsigned long level;
1669     const struct phm_clock_and_voltage_limits *limits =
1670             &hwmgr->dyn_state.max_clock_voltage_on_ac;
1671 
1672     if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1673         return -EINVAL;
1674 
1675     level = smu8_get_max_sclk_level(hwmgr) - 1;
1676 
1677     if (level < table->count)
1678         clocks->engine_max_clock = table->entries[level].clk;
1679     else
1680         clocks->engine_max_clock = table->entries[table->count - 1].clk;
1681 
1682     clocks->memory_max_clock = limits->mclk;
1683 
1684     return 0;
1685 }
1686 
1687 static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1688 {
1689     int actual_temp = 0;
1690     uint32_t val = cgs_read_ind_register(hwmgr->device,
1691                          CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1692     uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1693 
1694     if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1695         actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1696     else
1697         actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1698 
1699     return actual_temp;
1700 }
1701 
1702 static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1703               void *value, int *size)
1704 {
1705     struct smu8_hwmgr *data = hwmgr->backend;
1706 
1707     struct phm_clock_voltage_dependency_table *table =
1708                 hwmgr->dyn_state.vddc_dependency_on_sclk;
1709 
1710     struct phm_vce_clock_voltage_dependency_table *vce_table =
1711         hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1712 
1713     struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1714         hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1715 
1716     uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1717                     TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1718     uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1719                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1720     uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1721                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1722 
1723     uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1724     uint16_t vddnb, vddgfx;
1725     int result;
1726 
1727     /* size must be at least 4 bytes for all sensors */
1728     if (*size < 4)
1729         return -EINVAL;
1730     *size = 4;
1731 
1732     switch (idx) {
1733     case AMDGPU_PP_SENSOR_GFX_SCLK:
1734         if (sclk_index < NUM_SCLK_LEVELS) {
1735             sclk = table->entries[sclk_index].clk;
1736             *((uint32_t *)value) = sclk;
1737             return 0;
1738         }
1739         return -EINVAL;
1740     case AMDGPU_PP_SENSOR_VDDNB:
1741         tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1742             CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1743         vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1744         *((uint32_t *)value) = vddnb;
1745         return 0;
1746     case AMDGPU_PP_SENSOR_VDDGFX:
1747         tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1748             CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1749         vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1750         *((uint32_t *)value) = vddgfx;
1751         return 0;
1752     case AMDGPU_PP_SENSOR_UVD_VCLK:
1753         if (!data->uvd_power_gated) {
1754             if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1755                 return -EINVAL;
1756             } else {
1757                 vclk = uvd_table->entries[uvd_index].vclk;
1758                 *((uint32_t *)value) = vclk;
1759                 return 0;
1760             }
1761         }
1762         *((uint32_t *)value) = 0;
1763         return 0;
1764     case AMDGPU_PP_SENSOR_UVD_DCLK:
1765         if (!data->uvd_power_gated) {
1766             if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1767                 return -EINVAL;
1768             } else {
1769                 dclk = uvd_table->entries[uvd_index].dclk;
1770                 *((uint32_t *)value) = dclk;
1771                 return 0;
1772             }
1773         }
1774         *((uint32_t *)value) = 0;
1775         return 0;
1776     case AMDGPU_PP_SENSOR_VCE_ECCLK:
1777         if (!data->vce_power_gated) {
1778             if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1779                 return -EINVAL;
1780             } else {
1781                 ecclk = vce_table->entries[vce_index].ecclk;
1782                 *((uint32_t *)value) = ecclk;
1783                 return 0;
1784             }
1785         }
1786         *((uint32_t *)value) = 0;
1787         return 0;
1788     case AMDGPU_PP_SENSOR_GPU_LOAD:
1789         result = smum_send_msg_to_smc(hwmgr,
1790                 PPSMC_MSG_GetAverageGraphicsActivity,
1791                 &activity_percent);
1792         if (0 == result)
1793             activity_percent = activity_percent > 100 ? 100 : activity_percent;
1794         else
1795             return -EIO;
1796         *((uint32_t *)value) = activity_percent;
1797         return 0;
1798     case AMDGPU_PP_SENSOR_UVD_POWER:
1799         *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1800         return 0;
1801     case AMDGPU_PP_SENSOR_VCE_POWER:
1802         *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1803         return 0;
1804     case AMDGPU_PP_SENSOR_GPU_TEMP:
1805         *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1806         return 0;
1807     default:
1808         return -EOPNOTSUPP;
1809     }
1810 }
1811 
1812 static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1813                     uint32_t virtual_addr_low,
1814                     uint32_t virtual_addr_hi,
1815                     uint32_t mc_addr_low,
1816                     uint32_t mc_addr_hi,
1817                     uint32_t size)
1818 {
1819     smum_send_msg_to_smc_with_parameter(hwmgr,
1820                     PPSMC_MSG_DramAddrHiVirtual,
1821                     mc_addr_hi,
1822                     NULL);
1823     smum_send_msg_to_smc_with_parameter(hwmgr,
1824                     PPSMC_MSG_DramAddrLoVirtual,
1825                     mc_addr_low,
1826                     NULL);
1827     smum_send_msg_to_smc_with_parameter(hwmgr,
1828                     PPSMC_MSG_DramAddrHiPhysical,
1829                     virtual_addr_hi,
1830                     NULL);
1831     smum_send_msg_to_smc_with_parameter(hwmgr,
1832                     PPSMC_MSG_DramAddrLoPhysical,
1833                     virtual_addr_low,
1834                     NULL);
1835 
1836     smum_send_msg_to_smc_with_parameter(hwmgr,
1837                     PPSMC_MSG_DramBufferSize,
1838                     size,
1839                     NULL);
1840     return 0;
1841 }
1842 
1843 static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1844         struct PP_TemperatureRange *thermal_data)
1845 {
1846     struct smu8_hwmgr *data = hwmgr->backend;
1847 
1848     memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1849 
1850     thermal_data->max = (data->thermal_auto_throttling_treshold +
1851             data->sys_info.htc_hyst_lmt) *
1852             PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1853 
1854     return 0;
1855 }
1856 
1857 static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1858 {
1859     struct smu8_hwmgr *data = hwmgr->backend;
1860     uint32_t dpm_features = 0;
1861 
1862     if (enable &&
1863         phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1864                   PHM_PlatformCaps_UVDDPM)) {
1865         data->dpm_flags |= DPMFlags_UVD_Enabled;
1866         dpm_features |= UVD_DPM_MASK;
1867         smum_send_msg_to_smc_with_parameter(hwmgr,
1868                 PPSMC_MSG_EnableAllSmuFeatures,
1869                 dpm_features,
1870                 NULL);
1871     } else {
1872         dpm_features |= UVD_DPM_MASK;
1873         data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1874         smum_send_msg_to_smc_with_parameter(hwmgr,
1875                PPSMC_MSG_DisableAllSmuFeatures,
1876                dpm_features,
1877                NULL);
1878     }
1879     return 0;
1880 }
1881 
1882 static int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1883 {
1884     struct smu8_hwmgr *data = hwmgr->backend;
1885     struct phm_uvd_clock_voltage_dependency_table *ptable =
1886         hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1887 
1888     if (!bgate) {
1889         /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1890         if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1891             hwmgr->en_umd_pstate) {
1892             data->uvd_dpm.hard_min_clk =
1893                    ptable->entries[ptable->count - 1].vclk;
1894 
1895             smum_send_msg_to_smc_with_parameter(hwmgr,
1896                 PPSMC_MSG_SetUvdHardMin,
1897                 smu8_get_uvd_level(hwmgr,
1898                     data->uvd_dpm.hard_min_clk,
1899                     PPSMC_MSG_SetUvdHardMin),
1900                 NULL);
1901 
1902             smu8_enable_disable_uvd_dpm(hwmgr, true);
1903         } else {
1904             smu8_enable_disable_uvd_dpm(hwmgr, true);
1905         }
1906     } else {
1907         smu8_enable_disable_uvd_dpm(hwmgr, false);
1908     }
1909 
1910     return 0;
1911 }
1912 
1913 static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1914 {
1915     struct smu8_hwmgr *data = hwmgr->backend;
1916     uint32_t dpm_features = 0;
1917 
1918     if (enable && phm_cap_enabled(
1919                 hwmgr->platform_descriptor.platformCaps,
1920                 PHM_PlatformCaps_VCEDPM)) {
1921         data->dpm_flags |= DPMFlags_VCE_Enabled;
1922         dpm_features |= VCE_DPM_MASK;
1923         smum_send_msg_to_smc_with_parameter(hwmgr,
1924                 PPSMC_MSG_EnableAllSmuFeatures,
1925                 dpm_features,
1926                 NULL);
1927     } else {
1928         dpm_features |= VCE_DPM_MASK;
1929         data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1930         smum_send_msg_to_smc_with_parameter(hwmgr,
1931                PPSMC_MSG_DisableAllSmuFeatures,
1932                dpm_features,
1933                NULL);
1934     }
1935 
1936     return 0;
1937 }
1938 
1939 
1940 static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
1941 {
1942     struct smu8_hwmgr *data = hwmgr->backend;
1943 
1944     if (data->acp_power_gated == bgate)
1945         return;
1946 
1947     if (bgate)
1948         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
1949     else
1950         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
1951 }
1952 
1953 #define WIDTH_4K        3840
1954 
1955 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1956 {
1957     struct smu8_hwmgr *data = hwmgr->backend;
1958     struct amdgpu_device *adev = hwmgr->adev;
1959 
1960     data->uvd_power_gated = bgate;
1961 
1962     if (bgate) {
1963         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1964                         AMD_IP_BLOCK_TYPE_UVD,
1965                         AMD_PG_STATE_GATE);
1966         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1967                         AMD_IP_BLOCK_TYPE_UVD,
1968                         AMD_CG_STATE_GATE);
1969         smu8_dpm_update_uvd_dpm(hwmgr, true);
1970         smu8_dpm_powerdown_uvd(hwmgr);
1971     } else {
1972         smu8_dpm_powerup_uvd(hwmgr);
1973         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1974                         AMD_IP_BLOCK_TYPE_UVD,
1975                         AMD_CG_STATE_UNGATE);
1976         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1977                         AMD_IP_BLOCK_TYPE_UVD,
1978                         AMD_PG_STATE_UNGATE);
1979         smu8_dpm_update_uvd_dpm(hwmgr, false);
1980     }
1981 
1982     /* enable/disable Low Memory PState for UVD (4k videos) */
1983     if (adev->asic_type == CHIP_STONEY &&
1984         adev->uvd.decode_image_width >= WIDTH_4K)
1985         smu8_nbdpm_pstate_enable_disable(hwmgr,
1986                          bgate,
1987                          true);
1988 }
1989 
1990 static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1991 {
1992     struct smu8_hwmgr *data = hwmgr->backend;
1993 
1994     if (bgate) {
1995         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1996                     AMD_IP_BLOCK_TYPE_VCE,
1997                     AMD_PG_STATE_GATE);
1998         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1999                     AMD_IP_BLOCK_TYPE_VCE,
2000                     AMD_CG_STATE_GATE);
2001         smu8_enable_disable_vce_dpm(hwmgr, false);
2002         smu8_dpm_powerdown_vce(hwmgr);
2003         data->vce_power_gated = true;
2004     } else {
2005         smu8_dpm_powerup_vce(hwmgr);
2006         data->vce_power_gated = false;
2007         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2008                     AMD_IP_BLOCK_TYPE_VCE,
2009                     AMD_CG_STATE_UNGATE);
2010         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
2011                     AMD_IP_BLOCK_TYPE_VCE,
2012                     AMD_PG_STATE_UNGATE);
2013         smu8_dpm_update_vce_dpm(hwmgr);
2014         smu8_enable_disable_vce_dpm(hwmgr, true);
2015     }
2016 }
2017 
2018 static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
2019     .backend_init = smu8_hwmgr_backend_init,
2020     .backend_fini = smu8_hwmgr_backend_fini,
2021     .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
2022     .force_dpm_level = smu8_dpm_force_dpm_level,
2023     .get_power_state_size = smu8_get_power_state_size,
2024     .powerdown_uvd = smu8_dpm_powerdown_uvd,
2025     .powergate_uvd = smu8_dpm_powergate_uvd,
2026     .powergate_vce = smu8_dpm_powergate_vce,
2027     .powergate_acp = smu8_dpm_powergate_acp,
2028     .get_mclk = smu8_dpm_get_mclk,
2029     .get_sclk = smu8_dpm_get_sclk,
2030     .patch_boot_state = smu8_dpm_patch_boot_state,
2031     .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
2032     .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
2033     .set_cpu_power_state = smu8_set_cpu_power_state,
2034     .store_cc6_data = smu8_store_cc6_data,
2035     .force_clock_level = smu8_force_clock_level,
2036     .print_clock_levels = smu8_print_clock_levels,
2037     .get_dal_power_level = smu8_get_dal_power_level,
2038     .get_performance_level = smu8_get_performance_level,
2039     .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
2040     .get_clock_by_type = smu8_get_clock_by_type,
2041     .get_max_high_clocks = smu8_get_max_high_clocks,
2042     .read_sensor = smu8_read_sensor,
2043     .power_off_asic = smu8_power_off_asic,
2044     .asic_setup = smu8_setup_asic_task,
2045     .dynamic_state_management_enable = smu8_enable_dpm_tasks,
2046     .power_state_set = smu8_set_power_state_tasks,
2047     .dynamic_state_management_disable = smu8_disable_dpm_tasks,
2048     .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
2049     .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
2050 };
2051 
2052 int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
2053 {
2054     hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
2055     hwmgr->pptable_func = &pptable_funcs;
2056     return 0;
2057 }