Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2015 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "pp_debug.h"
0024 #include <linux/types.h>
0025 #include <linux/kernel.h>
0026 #include <linux/gfp.h>
0027 #include <linux/slab.h>
0028 #include <linux/firmware.h>
0029 #include "amd_shared.h"
0030 #include "amd_powerplay.h"
0031 #include "power_state.h"
0032 #include "amdgpu.h"
0033 #include "hwmgr.h"
0034 #include "amdgpu_dpm_internal.h"
0035 #include "amdgpu_display.h"
0036 
0037 static const struct amd_pm_funcs pp_dpm_funcs;
0038 
0039 static int amd_powerplay_create(struct amdgpu_device *adev)
0040 {
0041     struct pp_hwmgr *hwmgr;
0042 
0043     if (adev == NULL)
0044         return -EINVAL;
0045 
0046     hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
0047     if (hwmgr == NULL)
0048         return -ENOMEM;
0049 
0050     hwmgr->adev = adev;
0051     hwmgr->not_vf = !amdgpu_sriov_vf(adev);
0052     hwmgr->device = amdgpu_cgs_create_device(adev);
0053     mutex_init(&hwmgr->msg_lock);
0054     hwmgr->chip_family = adev->family;
0055     hwmgr->chip_id = adev->asic_type;
0056     hwmgr->feature_mask = adev->pm.pp_feature;
0057     hwmgr->display_config = &adev->pm.pm_display_cfg;
0058     adev->powerplay.pp_handle = hwmgr;
0059     adev->powerplay.pp_funcs = &pp_dpm_funcs;
0060     return 0;
0061 }
0062 
0063 
0064 static void amd_powerplay_destroy(struct amdgpu_device *adev)
0065 {
0066     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0067 
0068     mutex_destroy(&hwmgr->msg_lock);
0069 
0070     kfree(hwmgr->hardcode_pp_table);
0071     hwmgr->hardcode_pp_table = NULL;
0072 
0073     kfree(hwmgr);
0074     hwmgr = NULL;
0075 }
0076 
0077 static int pp_early_init(void *handle)
0078 {
0079     int ret;
0080     struct amdgpu_device *adev = handle;
0081 
0082     ret = amd_powerplay_create(adev);
0083 
0084     if (ret != 0)
0085         return ret;
0086 
0087     ret = hwmgr_early_init(adev->powerplay.pp_handle);
0088     if (ret)
0089         return -EINVAL;
0090 
0091     return 0;
0092 }
0093 
0094 static int pp_sw_init(void *handle)
0095 {
0096     struct amdgpu_device *adev = handle;
0097     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0098     int ret = 0;
0099 
0100     ret = hwmgr_sw_init(hwmgr);
0101 
0102     pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
0103 
0104     return ret;
0105 }
0106 
0107 static int pp_sw_fini(void *handle)
0108 {
0109     struct amdgpu_device *adev = handle;
0110     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0111 
0112     hwmgr_sw_fini(hwmgr);
0113 
0114     release_firmware(adev->pm.fw);
0115     adev->pm.fw = NULL;
0116 
0117     return 0;
0118 }
0119 
0120 static int pp_hw_init(void *handle)
0121 {
0122     int ret = 0;
0123     struct amdgpu_device *adev = handle;
0124     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0125 
0126     ret = hwmgr_hw_init(hwmgr);
0127 
0128     if (ret)
0129         pr_err("powerplay hw init failed\n");
0130 
0131     return ret;
0132 }
0133 
0134 static int pp_hw_fini(void *handle)
0135 {
0136     struct amdgpu_device *adev = handle;
0137     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0138 
0139     hwmgr_hw_fini(hwmgr);
0140 
0141     return 0;
0142 }
0143 
0144 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
0145 {
0146     int r = -EINVAL;
0147     void *cpu_ptr = NULL;
0148     uint64_t gpu_addr;
0149     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0150 
0151     if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
0152                         PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
0153                         &adev->pm.smu_prv_buffer,
0154                         &gpu_addr,
0155                         &cpu_ptr)) {
0156         DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
0157         return;
0158     }
0159 
0160     if (hwmgr->hwmgr_func->notify_cac_buffer_info)
0161         r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
0162                     lower_32_bits((unsigned long)cpu_ptr),
0163                     upper_32_bits((unsigned long)cpu_ptr),
0164                     lower_32_bits(gpu_addr),
0165                     upper_32_bits(gpu_addr),
0166                     adev->pm.smu_prv_buffer_size);
0167 
0168     if (r) {
0169         amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
0170         adev->pm.smu_prv_buffer = NULL;
0171         DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
0172     }
0173 }
0174 
0175 static int pp_late_init(void *handle)
0176 {
0177     struct amdgpu_device *adev = handle;
0178     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0179 
0180     if (hwmgr && hwmgr->pm_en)
0181         hwmgr_handle_task(hwmgr,
0182                     AMD_PP_TASK_COMPLETE_INIT, NULL);
0183     if (adev->pm.smu_prv_buffer_size != 0)
0184         pp_reserve_vram_for_smu(adev);
0185 
0186     return 0;
0187 }
0188 
0189 static void pp_late_fini(void *handle)
0190 {
0191     struct amdgpu_device *adev = handle;
0192 
0193     if (adev->pm.smu_prv_buffer)
0194         amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
0195     amd_powerplay_destroy(adev);
0196 }
0197 
0198 
0199 static bool pp_is_idle(void *handle)
0200 {
0201     return false;
0202 }
0203 
0204 static int pp_wait_for_idle(void *handle)
0205 {
0206     return 0;
0207 }
0208 
0209 static int pp_sw_reset(void *handle)
0210 {
0211     return 0;
0212 }
0213 
0214 static int pp_set_powergating_state(void *handle,
0215                     enum amd_powergating_state state)
0216 {
0217     return 0;
0218 }
0219 
0220 static int pp_suspend(void *handle)
0221 {
0222     struct amdgpu_device *adev = handle;
0223     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0224 
0225     return hwmgr_suspend(hwmgr);
0226 }
0227 
0228 static int pp_resume(void *handle)
0229 {
0230     struct amdgpu_device *adev = handle;
0231     struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
0232 
0233     return hwmgr_resume(hwmgr);
0234 }
0235 
0236 static int pp_set_clockgating_state(void *handle,
0237                       enum amd_clockgating_state state)
0238 {
0239     return 0;
0240 }
0241 
0242 static const struct amd_ip_funcs pp_ip_funcs = {
0243     .name = "powerplay",
0244     .early_init = pp_early_init,
0245     .late_init = pp_late_init,
0246     .sw_init = pp_sw_init,
0247     .sw_fini = pp_sw_fini,
0248     .hw_init = pp_hw_init,
0249     .hw_fini = pp_hw_fini,
0250     .late_fini = pp_late_fini,
0251     .suspend = pp_suspend,
0252     .resume = pp_resume,
0253     .is_idle = pp_is_idle,
0254     .wait_for_idle = pp_wait_for_idle,
0255     .soft_reset = pp_sw_reset,
0256     .set_clockgating_state = pp_set_clockgating_state,
0257     .set_powergating_state = pp_set_powergating_state,
0258 };
0259 
0260 const struct amdgpu_ip_block_version pp_smu_ip_block =
0261 {
0262     .type = AMD_IP_BLOCK_TYPE_SMC,
0263     .major = 1,
0264     .minor = 0,
0265     .rev = 0,
0266     .funcs = &pp_ip_funcs,
0267 };
0268 
0269 /* This interface only be supported On Vi,
0270  * because only smu7/8 can help to load gfx/sdma fw,
0271  * smu need to be enabled before load other ip's fw.
0272  * so call start smu to load smu7 fw and other ip's fw
0273  */
0274 static int pp_dpm_load_fw(void *handle)
0275 {
0276     struct pp_hwmgr *hwmgr = handle;
0277 
0278     if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
0279         return -EINVAL;
0280 
0281     if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
0282         pr_err("fw load failed\n");
0283         return -EINVAL;
0284     }
0285 
0286     return 0;
0287 }
0288 
0289 static int pp_dpm_fw_loading_complete(void *handle)
0290 {
0291     return 0;
0292 }
0293 
0294 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
0295 {
0296     struct pp_hwmgr *hwmgr = handle;
0297 
0298     if (!hwmgr || !hwmgr->pm_en)
0299         return -EINVAL;
0300 
0301     if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
0302         pr_info_ratelimited("%s was not implemented.\n", __func__);
0303         return 0;
0304     }
0305 
0306     return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
0307 }
0308 
0309 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
0310                         enum amd_dpm_forced_level *level)
0311 {
0312     uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
0313                     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
0314                     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
0315                     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
0316 
0317     if (!(hwmgr->dpm_level & profile_mode_mask)) {
0318         /* enter umd pstate, save current level, disable gfx cg*/
0319         if (*level & profile_mode_mask) {
0320             hwmgr->saved_dpm_level = hwmgr->dpm_level;
0321             hwmgr->en_umd_pstate = true;
0322         }
0323     } else {
0324         /* exit umd pstate, restore level, enable gfx cg*/
0325         if (!(*level & profile_mode_mask)) {
0326             if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
0327                 *level = hwmgr->saved_dpm_level;
0328             hwmgr->en_umd_pstate = false;
0329         }
0330     }
0331 }
0332 
0333 static int pp_dpm_force_performance_level(void *handle,
0334                     enum amd_dpm_forced_level level)
0335 {
0336     struct pp_hwmgr *hwmgr = handle;
0337 
0338     if (!hwmgr || !hwmgr->pm_en)
0339         return -EINVAL;
0340 
0341     if (level == hwmgr->dpm_level)
0342         return 0;
0343 
0344     pp_dpm_en_umd_pstate(hwmgr, &level);
0345     hwmgr->request_dpm_level = level;
0346     hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
0347 
0348     return 0;
0349 }
0350 
0351 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
0352                                 void *handle)
0353 {
0354     struct pp_hwmgr *hwmgr = handle;
0355 
0356     if (!hwmgr || !hwmgr->pm_en)
0357         return -EINVAL;
0358 
0359     return hwmgr->dpm_level;
0360 }
0361 
0362 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
0363 {
0364     struct pp_hwmgr *hwmgr = handle;
0365 
0366     if (!hwmgr || !hwmgr->pm_en)
0367         return 0;
0368 
0369     if (hwmgr->hwmgr_func->get_sclk == NULL) {
0370         pr_info_ratelimited("%s was not implemented.\n", __func__);
0371         return 0;
0372     }
0373     return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
0374 }
0375 
0376 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
0377 {
0378     struct pp_hwmgr *hwmgr = handle;
0379 
0380     if (!hwmgr || !hwmgr->pm_en)
0381         return 0;
0382 
0383     if (hwmgr->hwmgr_func->get_mclk == NULL) {
0384         pr_info_ratelimited("%s was not implemented.\n", __func__);
0385         return 0;
0386     }
0387     return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
0388 }
0389 
0390 static void pp_dpm_powergate_vce(void *handle, bool gate)
0391 {
0392     struct pp_hwmgr *hwmgr = handle;
0393 
0394     if (!hwmgr || !hwmgr->pm_en)
0395         return;
0396 
0397     if (hwmgr->hwmgr_func->powergate_vce == NULL) {
0398         pr_info_ratelimited("%s was not implemented.\n", __func__);
0399         return;
0400     }
0401     hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
0402 }
0403 
0404 static void pp_dpm_powergate_uvd(void *handle, bool gate)
0405 {
0406     struct pp_hwmgr *hwmgr = handle;
0407 
0408     if (!hwmgr || !hwmgr->pm_en)
0409         return;
0410 
0411     if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
0412         pr_info_ratelimited("%s was not implemented.\n", __func__);
0413         return;
0414     }
0415     hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
0416 }
0417 
0418 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
0419         enum amd_pm_state_type *user_state)
0420 {
0421     struct pp_hwmgr *hwmgr = handle;
0422 
0423     if (!hwmgr || !hwmgr->pm_en)
0424         return -EINVAL;
0425 
0426     return hwmgr_handle_task(hwmgr, task_id, user_state);
0427 }
0428 
0429 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
0430 {
0431     struct pp_hwmgr *hwmgr = handle;
0432     struct pp_power_state *state;
0433     enum amd_pm_state_type pm_type;
0434 
0435     if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
0436         return -EINVAL;
0437 
0438     state = hwmgr->current_ps;
0439 
0440     switch (state->classification.ui_label) {
0441     case PP_StateUILabel_Battery:
0442         pm_type = POWER_STATE_TYPE_BATTERY;
0443         break;
0444     case PP_StateUILabel_Balanced:
0445         pm_type = POWER_STATE_TYPE_BALANCED;
0446         break;
0447     case PP_StateUILabel_Performance:
0448         pm_type = POWER_STATE_TYPE_PERFORMANCE;
0449         break;
0450     default:
0451         if (state->classification.flags & PP_StateClassificationFlag_Boot)
0452             pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
0453         else
0454             pm_type = POWER_STATE_TYPE_DEFAULT;
0455         break;
0456     }
0457 
0458     return pm_type;
0459 }
0460 
0461 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
0462 {
0463     struct pp_hwmgr *hwmgr = handle;
0464 
0465     if (!hwmgr || !hwmgr->pm_en)
0466         return -EOPNOTSUPP;
0467 
0468     if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
0469         return -EOPNOTSUPP;
0470 
0471     if (mode == U32_MAX)
0472         return -EINVAL;
0473 
0474     hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
0475 
0476     return 0;
0477 }
0478 
0479 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
0480 {
0481     struct pp_hwmgr *hwmgr = handle;
0482 
0483     if (!hwmgr || !hwmgr->pm_en)
0484         return -EOPNOTSUPP;
0485 
0486     if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
0487         return -EOPNOTSUPP;
0488 
0489     if (!fan_mode)
0490         return -EINVAL;
0491 
0492     *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
0493     return 0;
0494 }
0495 
0496 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
0497 {
0498     struct pp_hwmgr *hwmgr = handle;
0499 
0500     if (!hwmgr || !hwmgr->pm_en)
0501         return -EOPNOTSUPP;
0502 
0503     if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
0504         return -EOPNOTSUPP;
0505 
0506     if (speed == U32_MAX)
0507         return -EINVAL;
0508 
0509     return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
0510 }
0511 
0512 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
0513 {
0514     struct pp_hwmgr *hwmgr = handle;
0515 
0516     if (!hwmgr || !hwmgr->pm_en)
0517         return -EOPNOTSUPP;
0518 
0519     if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
0520         return -EOPNOTSUPP;
0521 
0522     if (!speed)
0523         return -EINVAL;
0524 
0525     return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
0526 }
0527 
0528 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
0529 {
0530     struct pp_hwmgr *hwmgr = handle;
0531 
0532     if (!hwmgr || !hwmgr->pm_en)
0533         return -EOPNOTSUPP;
0534 
0535     if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
0536         return -EOPNOTSUPP;
0537 
0538     if (!rpm)
0539         return -EINVAL;
0540 
0541     return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
0542 }
0543 
0544 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
0545 {
0546     struct pp_hwmgr *hwmgr = handle;
0547 
0548     if (!hwmgr || !hwmgr->pm_en)
0549         return -EOPNOTSUPP;
0550 
0551     if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
0552         return -EOPNOTSUPP;
0553 
0554     if (rpm == U32_MAX)
0555         return -EINVAL;
0556 
0557     return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
0558 }
0559 
0560 static int pp_dpm_get_pp_num_states(void *handle,
0561         struct pp_states_info *data)
0562 {
0563     struct pp_hwmgr *hwmgr = handle;
0564     int i;
0565 
0566     memset(data, 0, sizeof(*data));
0567 
0568     if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
0569         return -EINVAL;
0570 
0571     data->nums = hwmgr->num_ps;
0572 
0573     for (i = 0; i < hwmgr->num_ps; i++) {
0574         struct pp_power_state *state = (struct pp_power_state *)
0575                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
0576         switch (state->classification.ui_label) {
0577         case PP_StateUILabel_Battery:
0578             data->states[i] = POWER_STATE_TYPE_BATTERY;
0579             break;
0580         case PP_StateUILabel_Balanced:
0581             data->states[i] = POWER_STATE_TYPE_BALANCED;
0582             break;
0583         case PP_StateUILabel_Performance:
0584             data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
0585             break;
0586         default:
0587             if (state->classification.flags & PP_StateClassificationFlag_Boot)
0588                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
0589             else
0590                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
0591         }
0592     }
0593     return 0;
0594 }
0595 
0596 static int pp_dpm_get_pp_table(void *handle, char **table)
0597 {
0598     struct pp_hwmgr *hwmgr = handle;
0599 
0600     if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
0601         return -EINVAL;
0602 
0603     *table = (char *)hwmgr->soft_pp_table;
0604     return hwmgr->soft_pp_table_size;
0605 }
0606 
0607 static int amd_powerplay_reset(void *handle)
0608 {
0609     struct pp_hwmgr *hwmgr = handle;
0610     int ret;
0611 
0612     ret = hwmgr_hw_fini(hwmgr);
0613     if (ret)
0614         return ret;
0615 
0616     ret = hwmgr_hw_init(hwmgr);
0617     if (ret)
0618         return ret;
0619 
0620     return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
0621 }
0622 
0623 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
0624 {
0625     struct pp_hwmgr *hwmgr = handle;
0626     int ret = -ENOMEM;
0627 
0628     if (!hwmgr || !hwmgr->pm_en)
0629         return -EINVAL;
0630 
0631     if (!hwmgr->hardcode_pp_table) {
0632         hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
0633                            hwmgr->soft_pp_table_size,
0634                            GFP_KERNEL);
0635         if (!hwmgr->hardcode_pp_table)
0636             return ret;
0637     }
0638 
0639     memcpy(hwmgr->hardcode_pp_table, buf, size);
0640 
0641     hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
0642 
0643     ret = amd_powerplay_reset(handle);
0644     if (ret)
0645         return ret;
0646 
0647     if (hwmgr->hwmgr_func->avfs_control)
0648         ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
0649 
0650     return ret;
0651 }
0652 
0653 static int pp_dpm_force_clock_level(void *handle,
0654         enum pp_clock_type type, uint32_t mask)
0655 {
0656     struct pp_hwmgr *hwmgr = handle;
0657 
0658     if (!hwmgr || !hwmgr->pm_en)
0659         return -EINVAL;
0660 
0661     if (hwmgr->hwmgr_func->force_clock_level == NULL) {
0662         pr_info_ratelimited("%s was not implemented.\n", __func__);
0663         return 0;
0664     }
0665 
0666     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
0667         pr_debug("force clock level is for dpm manual mode only.\n");
0668         return -EINVAL;
0669     }
0670 
0671     return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
0672 }
0673 
0674 static int pp_dpm_emit_clock_levels(void *handle,
0675                     enum pp_clock_type type,
0676                     char *buf,
0677                     int *offset)
0678 {
0679     struct pp_hwmgr *hwmgr = handle;
0680 
0681     if (!hwmgr || !hwmgr->pm_en)
0682         return -EOPNOTSUPP;
0683 
0684     if (!hwmgr->hwmgr_func->emit_clock_levels)
0685         return -ENOENT;
0686 
0687     return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
0688 }
0689 
0690 static int pp_dpm_print_clock_levels(void *handle,
0691         enum pp_clock_type type, char *buf)
0692 {
0693     struct pp_hwmgr *hwmgr = handle;
0694 
0695     if (!hwmgr || !hwmgr->pm_en)
0696         return -EINVAL;
0697 
0698     if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
0699         pr_info_ratelimited("%s was not implemented.\n", __func__);
0700         return 0;
0701     }
0702     return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
0703 }
0704 
0705 static int pp_dpm_get_sclk_od(void *handle)
0706 {
0707     struct pp_hwmgr *hwmgr = handle;
0708 
0709     if (!hwmgr || !hwmgr->pm_en)
0710         return -EINVAL;
0711 
0712     if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
0713         pr_info_ratelimited("%s was not implemented.\n", __func__);
0714         return 0;
0715     }
0716     return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
0717 }
0718 
0719 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
0720 {
0721     struct pp_hwmgr *hwmgr = handle;
0722 
0723     if (!hwmgr || !hwmgr->pm_en)
0724         return -EINVAL;
0725 
0726     if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
0727         pr_info_ratelimited("%s was not implemented.\n", __func__);
0728         return 0;
0729     }
0730 
0731     return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
0732 }
0733 
0734 static int pp_dpm_get_mclk_od(void *handle)
0735 {
0736     struct pp_hwmgr *hwmgr = handle;
0737 
0738     if (!hwmgr || !hwmgr->pm_en)
0739         return -EINVAL;
0740 
0741     if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
0742         pr_info_ratelimited("%s was not implemented.\n", __func__);
0743         return 0;
0744     }
0745     return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
0746 }
0747 
0748 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
0749 {
0750     struct pp_hwmgr *hwmgr = handle;
0751 
0752     if (!hwmgr || !hwmgr->pm_en)
0753         return -EINVAL;
0754 
0755     if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
0756         pr_info_ratelimited("%s was not implemented.\n", __func__);
0757         return 0;
0758     }
0759     return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
0760 }
0761 
0762 static int pp_dpm_read_sensor(void *handle, int idx,
0763                   void *value, int *size)
0764 {
0765     struct pp_hwmgr *hwmgr = handle;
0766 
0767     if (!hwmgr || !hwmgr->pm_en || !value)
0768         return -EINVAL;
0769 
0770     switch (idx) {
0771     case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
0772         *((uint32_t *)value) = hwmgr->pstate_sclk;
0773         return 0;
0774     case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
0775         *((uint32_t *)value) = hwmgr->pstate_mclk;
0776         return 0;
0777     case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
0778         *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
0779         return 0;
0780     case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
0781         *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
0782         return 0;
0783     default:
0784         return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
0785     }
0786 }
0787 
0788 static struct amd_vce_state*
0789 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
0790 {
0791     struct pp_hwmgr *hwmgr = handle;
0792 
0793     if (!hwmgr || !hwmgr->pm_en)
0794         return NULL;
0795 
0796     if (idx < hwmgr->num_vce_state_tables)
0797         return &hwmgr->vce_states[idx];
0798     return NULL;
0799 }
0800 
0801 static int pp_get_power_profile_mode(void *handle, char *buf)
0802 {
0803     struct pp_hwmgr *hwmgr = handle;
0804 
0805     if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
0806         return -EOPNOTSUPP;
0807     if (!buf)
0808         return -EINVAL;
0809 
0810     return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
0811 }
0812 
0813 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
0814 {
0815     struct pp_hwmgr *hwmgr = handle;
0816 
0817     if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
0818         return -EOPNOTSUPP;
0819 
0820     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
0821         pr_debug("power profile setting is for manual dpm mode only.\n");
0822         return -EINVAL;
0823     }
0824 
0825     return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
0826 }
0827 
0828 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
0829 {
0830     struct pp_hwmgr *hwmgr = handle;
0831 
0832     if (!hwmgr || !hwmgr->pm_en)
0833         return -EINVAL;
0834 
0835     if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
0836         return 0;
0837 
0838     return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
0839 }
0840 
0841 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
0842 {
0843     struct pp_hwmgr *hwmgr = handle;
0844 
0845     if (!hwmgr || !hwmgr->pm_en)
0846         return -EINVAL;
0847 
0848     if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
0849         pr_info_ratelimited("%s was not implemented.\n", __func__);
0850         return 0;
0851     }
0852 
0853     return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
0854 }
0855 
0856 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
0857 {
0858     struct pp_hwmgr *hwmgr = handle;
0859 
0860     if (!hwmgr)
0861         return -EINVAL;
0862 
0863     if (!hwmgr->pm_en)
0864         return 0;
0865 
0866     if (hwmgr->hwmgr_func->set_mp1_state)
0867         return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
0868 
0869     return 0;
0870 }
0871 
0872 static int pp_dpm_switch_power_profile(void *handle,
0873         enum PP_SMC_POWER_PROFILE type, bool en)
0874 {
0875     struct pp_hwmgr *hwmgr = handle;
0876     long workload;
0877     uint32_t index;
0878 
0879     if (!hwmgr || !hwmgr->pm_en)
0880         return -EINVAL;
0881 
0882     if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
0883         pr_info_ratelimited("%s was not implemented.\n", __func__);
0884         return -EINVAL;
0885     }
0886 
0887     if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
0888         return -EINVAL;
0889 
0890     if (!en) {
0891         hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
0892         index = fls(hwmgr->workload_mask);
0893         index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
0894         workload = hwmgr->workload_setting[index];
0895     } else {
0896         hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
0897         index = fls(hwmgr->workload_mask);
0898         index = index <= Workload_Policy_Max ? index - 1 : 0;
0899         workload = hwmgr->workload_setting[index];
0900     }
0901 
0902     if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
0903         hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
0904             if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
0905                 return -EINVAL;
0906     }
0907 
0908     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
0909         hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
0910 
0911     return 0;
0912 }
0913 
0914 static int pp_set_power_limit(void *handle, uint32_t limit)
0915 {
0916     struct pp_hwmgr *hwmgr = handle;
0917     uint32_t max_power_limit;
0918 
0919     if (!hwmgr || !hwmgr->pm_en)
0920         return -EINVAL;
0921 
0922     if (hwmgr->hwmgr_func->set_power_limit == NULL) {
0923         pr_info_ratelimited("%s was not implemented.\n", __func__);
0924         return -EINVAL;
0925     }
0926 
0927     if (limit == 0)
0928         limit = hwmgr->default_power_limit;
0929 
0930     max_power_limit = hwmgr->default_power_limit;
0931     if (hwmgr->od_enabled) {
0932         max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
0933         max_power_limit /= 100;
0934     }
0935 
0936     if (limit > max_power_limit)
0937         return -EINVAL;
0938 
0939     hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
0940     hwmgr->power_limit = limit;
0941     return 0;
0942 }
0943 
0944 static int pp_get_power_limit(void *handle, uint32_t *limit,
0945                   enum pp_power_limit_level pp_limit_level,
0946                   enum pp_power_type power_type)
0947 {
0948     struct pp_hwmgr *hwmgr = handle;
0949     int ret = 0;
0950 
0951     if (!hwmgr || !hwmgr->pm_en ||!limit)
0952         return -EINVAL;
0953 
0954     if (power_type != PP_PWR_TYPE_SUSTAINED)
0955         return -EOPNOTSUPP;
0956 
0957     switch (pp_limit_level) {
0958         case PP_PWR_LIMIT_CURRENT:
0959             *limit = hwmgr->power_limit;
0960             break;
0961         case PP_PWR_LIMIT_DEFAULT:
0962             *limit = hwmgr->default_power_limit;
0963             break;
0964         case PP_PWR_LIMIT_MAX:
0965             *limit = hwmgr->default_power_limit;
0966             if (hwmgr->od_enabled) {
0967                 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
0968                 *limit /= 100;
0969             }
0970             break;
0971         default:
0972             ret = -EOPNOTSUPP;
0973             break;
0974     }
0975 
0976     return ret;
0977 }
0978 
0979 static int pp_display_configuration_change(void *handle,
0980     const struct amd_pp_display_configuration *display_config)
0981 {
0982     struct pp_hwmgr *hwmgr = handle;
0983 
0984     if (!hwmgr || !hwmgr->pm_en)
0985         return -EINVAL;
0986 
0987     phm_store_dal_configuration_data(hwmgr, display_config);
0988     return 0;
0989 }
0990 
0991 static int pp_get_display_power_level(void *handle,
0992         struct amd_pp_simple_clock_info *output)
0993 {
0994     struct pp_hwmgr *hwmgr = handle;
0995 
0996     if (!hwmgr || !hwmgr->pm_en ||!output)
0997         return -EINVAL;
0998 
0999     return phm_get_dal_power_level(hwmgr, output);
1000 }
1001 
1002 static int pp_get_current_clocks(void *handle,
1003         struct amd_pp_clock_info *clocks)
1004 {
1005     struct amd_pp_simple_clock_info simple_clocks = { 0 };
1006     struct pp_clock_info hw_clocks;
1007     struct pp_hwmgr *hwmgr = handle;
1008     int ret = 0;
1009 
1010     if (!hwmgr || !hwmgr->pm_en)
1011         return -EINVAL;
1012 
1013     phm_get_dal_power_level(hwmgr, &simple_clocks);
1014 
1015     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1016                     PHM_PlatformCaps_PowerContainment))
1017         ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1018                     &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1019     else
1020         ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1021                     &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1022 
1023     if (ret) {
1024         pr_debug("Error in phm_get_clock_info \n");
1025         return -EINVAL;
1026     }
1027 
1028     clocks->min_engine_clock = hw_clocks.min_eng_clk;
1029     clocks->max_engine_clock = hw_clocks.max_eng_clk;
1030     clocks->min_memory_clock = hw_clocks.min_mem_clk;
1031     clocks->max_memory_clock = hw_clocks.max_mem_clk;
1032     clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1033     clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1034 
1035     clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1036     clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1037 
1038     if (simple_clocks.level == 0)
1039         clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1040     else
1041         clocks->max_clocks_state = simple_clocks.level;
1042 
1043     if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1044         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1045         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1046     }
1047     return 0;
1048 }
1049 
1050 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1051 {
1052     struct pp_hwmgr *hwmgr = handle;
1053 
1054     if (!hwmgr || !hwmgr->pm_en)
1055         return -EINVAL;
1056 
1057     if (clocks == NULL)
1058         return -EINVAL;
1059 
1060     return phm_get_clock_by_type(hwmgr, type, clocks);
1061 }
1062 
1063 static int pp_get_clock_by_type_with_latency(void *handle,
1064         enum amd_pp_clock_type type,
1065         struct pp_clock_levels_with_latency *clocks)
1066 {
1067     struct pp_hwmgr *hwmgr = handle;
1068 
1069     if (!hwmgr || !hwmgr->pm_en ||!clocks)
1070         return -EINVAL;
1071 
1072     return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1073 }
1074 
1075 static int pp_get_clock_by_type_with_voltage(void *handle,
1076         enum amd_pp_clock_type type,
1077         struct pp_clock_levels_with_voltage *clocks)
1078 {
1079     struct pp_hwmgr *hwmgr = handle;
1080 
1081     if (!hwmgr || !hwmgr->pm_en ||!clocks)
1082         return -EINVAL;
1083 
1084     return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1085 }
1086 
1087 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1088         void *clock_ranges)
1089 {
1090     struct pp_hwmgr *hwmgr = handle;
1091 
1092     if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1093         return -EINVAL;
1094 
1095     return phm_set_watermarks_for_clocks_ranges(hwmgr,
1096                             clock_ranges);
1097 }
1098 
1099 static int pp_display_clock_voltage_request(void *handle,
1100         struct pp_display_clock_request *clock)
1101 {
1102     struct pp_hwmgr *hwmgr = handle;
1103 
1104     if (!hwmgr || !hwmgr->pm_en ||!clock)
1105         return -EINVAL;
1106 
1107     return phm_display_clock_voltage_request(hwmgr, clock);
1108 }
1109 
1110 static int pp_get_display_mode_validation_clocks(void *handle,
1111         struct amd_pp_simple_clock_info *clocks)
1112 {
1113     struct pp_hwmgr *hwmgr = handle;
1114     int ret = 0;
1115 
1116     if (!hwmgr || !hwmgr->pm_en ||!clocks)
1117         return -EINVAL;
1118 
1119     clocks->level = PP_DAL_POWERLEVEL_7;
1120 
1121     if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1122         ret = phm_get_max_high_clocks(hwmgr, clocks);
1123 
1124     return ret;
1125 }
1126 
1127 static int pp_dpm_powergate_mmhub(void *handle)
1128 {
1129     struct pp_hwmgr *hwmgr = handle;
1130 
1131     if (!hwmgr || !hwmgr->pm_en)
1132         return -EINVAL;
1133 
1134     if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1135         pr_info_ratelimited("%s was not implemented.\n", __func__);
1136         return 0;
1137     }
1138 
1139     return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1140 }
1141 
1142 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1143 {
1144     struct pp_hwmgr *hwmgr = handle;
1145 
1146     if (!hwmgr || !hwmgr->pm_en)
1147         return 0;
1148 
1149     if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1150         pr_info_ratelimited("%s was not implemented.\n", __func__);
1151         return 0;
1152     }
1153 
1154     return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1155 }
1156 
1157 static void pp_dpm_powergate_acp(void *handle, bool gate)
1158 {
1159     struct pp_hwmgr *hwmgr = handle;
1160 
1161     if (!hwmgr || !hwmgr->pm_en)
1162         return;
1163 
1164     if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1165         pr_info_ratelimited("%s was not implemented.\n", __func__);
1166         return;
1167     }
1168 
1169     hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1170 }
1171 
1172 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1173 {
1174     struct pp_hwmgr *hwmgr = handle;
1175 
1176     if (!hwmgr)
1177         return;
1178 
1179     if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1180         pr_info_ratelimited("%s was not implemented.\n", __func__);
1181         return;
1182     }
1183 
1184     hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1185 }
1186 
1187 static int pp_set_powergating_by_smu(void *handle,
1188                 uint32_t block_type, bool gate)
1189 {
1190     int ret = 0;
1191 
1192     switch (block_type) {
1193     case AMD_IP_BLOCK_TYPE_UVD:
1194     case AMD_IP_BLOCK_TYPE_VCN:
1195         pp_dpm_powergate_uvd(handle, gate);
1196         break;
1197     case AMD_IP_BLOCK_TYPE_VCE:
1198         pp_dpm_powergate_vce(handle, gate);
1199         break;
1200     case AMD_IP_BLOCK_TYPE_GMC:
1201         /*
1202          * For now, this is only used on PICASSO.
1203          * And only "gate" operation is supported.
1204          */
1205         if (gate)
1206             pp_dpm_powergate_mmhub(handle);
1207         break;
1208     case AMD_IP_BLOCK_TYPE_GFX:
1209         ret = pp_dpm_powergate_gfx(handle, gate);
1210         break;
1211     case AMD_IP_BLOCK_TYPE_ACP:
1212         pp_dpm_powergate_acp(handle, gate);
1213         break;
1214     case AMD_IP_BLOCK_TYPE_SDMA:
1215         pp_dpm_powergate_sdma(handle, gate);
1216         break;
1217     default:
1218         break;
1219     }
1220     return ret;
1221 }
1222 
1223 static int pp_notify_smu_enable_pwe(void *handle)
1224 {
1225     struct pp_hwmgr *hwmgr = handle;
1226 
1227     if (!hwmgr || !hwmgr->pm_en)
1228         return -EINVAL;
1229 
1230     if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1231         pr_info_ratelimited("%s was not implemented.\n", __func__);
1232         return -EINVAL;
1233     }
1234 
1235     hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1236 
1237     return 0;
1238 }
1239 
1240 static int pp_enable_mgpu_fan_boost(void *handle)
1241 {
1242     struct pp_hwmgr *hwmgr = handle;
1243 
1244     if (!hwmgr)
1245         return -EINVAL;
1246 
1247     if (!hwmgr->pm_en ||
1248          hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1249         return 0;
1250 
1251     hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1252 
1253     return 0;
1254 }
1255 
1256 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1257 {
1258     struct pp_hwmgr *hwmgr = handle;
1259 
1260     if (!hwmgr || !hwmgr->pm_en)
1261         return -EINVAL;
1262 
1263     if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1264         pr_debug("%s was not implemented.\n", __func__);
1265         return -EINVAL;
1266     }
1267 
1268     hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1269 
1270     return 0;
1271 }
1272 
1273 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1274 {
1275     struct pp_hwmgr *hwmgr = handle;
1276 
1277     if (!hwmgr || !hwmgr->pm_en)
1278         return -EINVAL;
1279 
1280     if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1281         pr_debug("%s was not implemented.\n", __func__);
1282         return -EINVAL;
1283     }
1284 
1285     hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1286 
1287     return 0;
1288 }
1289 
1290 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1291 {
1292     struct pp_hwmgr *hwmgr = handle;
1293 
1294     if (!hwmgr || !hwmgr->pm_en)
1295         return -EINVAL;
1296 
1297     if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1298         pr_debug("%s was not implemented.\n", __func__);
1299         return -EINVAL;
1300     }
1301 
1302     hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1303 
1304     return 0;
1305 }
1306 
1307 static int pp_set_active_display_count(void *handle, uint32_t count)
1308 {
1309     struct pp_hwmgr *hwmgr = handle;
1310 
1311     if (!hwmgr || !hwmgr->pm_en)
1312         return -EINVAL;
1313 
1314     return phm_set_active_display_count(hwmgr, count);
1315 }
1316 
1317 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1318 {
1319     struct pp_hwmgr *hwmgr = handle;
1320 
1321     *cap = false;
1322     if (!hwmgr)
1323         return -EINVAL;
1324 
1325     if (!(hwmgr->not_vf && amdgpu_dpm) ||
1326         !hwmgr->hwmgr_func->get_asic_baco_capability)
1327         return 0;
1328 
1329     hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1330 
1331     return 0;
1332 }
1333 
1334 static int pp_get_asic_baco_state(void *handle, int *state)
1335 {
1336     struct pp_hwmgr *hwmgr = handle;
1337 
1338     if (!hwmgr)
1339         return -EINVAL;
1340 
1341     if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1342         return 0;
1343 
1344     hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1345 
1346     return 0;
1347 }
1348 
1349 static int pp_set_asic_baco_state(void *handle, int state)
1350 {
1351     struct pp_hwmgr *hwmgr = handle;
1352 
1353     if (!hwmgr)
1354         return -EINVAL;
1355 
1356     if (!(hwmgr->not_vf && amdgpu_dpm) ||
1357         !hwmgr->hwmgr_func->set_asic_baco_state)
1358         return 0;
1359 
1360     hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1361 
1362     return 0;
1363 }
1364 
1365 static int pp_get_ppfeature_status(void *handle, char *buf)
1366 {
1367     struct pp_hwmgr *hwmgr = handle;
1368 
1369     if (!hwmgr || !hwmgr->pm_en || !buf)
1370         return -EINVAL;
1371 
1372     if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1373         pr_info_ratelimited("%s was not implemented.\n", __func__);
1374         return -EINVAL;
1375     }
1376 
1377     return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1378 }
1379 
1380 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1381 {
1382     struct pp_hwmgr *hwmgr = handle;
1383 
1384     if (!hwmgr || !hwmgr->pm_en)
1385         return -EINVAL;
1386 
1387     if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1388         pr_info_ratelimited("%s was not implemented.\n", __func__);
1389         return -EINVAL;
1390     }
1391 
1392     return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1393 }
1394 
1395 static int pp_asic_reset_mode_2(void *handle)
1396 {
1397     struct pp_hwmgr *hwmgr = handle;
1398 
1399     if (!hwmgr || !hwmgr->pm_en)
1400         return -EINVAL;
1401 
1402     if (hwmgr->hwmgr_func->asic_reset == NULL) {
1403         pr_info_ratelimited("%s was not implemented.\n", __func__);
1404         return -EINVAL;
1405     }
1406 
1407     return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1408 }
1409 
1410 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1411 {
1412     struct pp_hwmgr *hwmgr = handle;
1413 
1414     if (!hwmgr || !hwmgr->pm_en)
1415         return -EINVAL;
1416 
1417     if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1418         pr_info_ratelimited("%s was not implemented.\n", __func__);
1419         return -EINVAL;
1420     }
1421 
1422     return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1423 }
1424 
1425 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1426 {
1427     struct pp_hwmgr *hwmgr = handle;
1428 
1429     if (!hwmgr)
1430         return -EINVAL;
1431 
1432     if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1433         return 0;
1434 
1435     hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1436 
1437     return 0;
1438 }
1439 
1440 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1441 {
1442     struct pp_hwmgr *hwmgr = handle;
1443 
1444     if (!hwmgr)
1445         return -EINVAL;
1446 
1447     if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1448         return 0;
1449 
1450     hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1451 
1452     return 0;
1453 }
1454 
1455 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1456 {
1457     struct pp_hwmgr *hwmgr = handle;
1458 
1459     if (!hwmgr)
1460         return -EINVAL;
1461 
1462     if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1463         return -EOPNOTSUPP;
1464 
1465     return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1466 }
1467 
1468 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1469 {
1470     struct pp_hwmgr *hwmgr = handle;
1471 
1472     if (!hwmgr || !hwmgr->pm_en)
1473         return -EINVAL;
1474 
1475     if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1476         pr_info_ratelimited("%s was not implemented.\n", __func__);
1477         return -EINVAL;
1478     }
1479 
1480     hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1481     return 0;
1482 }
1483 
1484 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1485 {
1486     struct pp_hwmgr *hwmgr = handle;
1487     struct amdgpu_device *adev = hwmgr->adev;
1488 
1489     if (!addr || !size)
1490         return -EINVAL;
1491 
1492     *addr = NULL;
1493     *size = 0;
1494     if (adev->pm.smu_prv_buffer) {
1495         amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1496         *size = adev->pm.smu_prv_buffer_size;
1497     }
1498 
1499     return 0;
1500 }
1501 
1502 static void pp_pm_compute_clocks(void *handle)
1503 {
1504     struct pp_hwmgr *hwmgr = handle;
1505     struct amdgpu_device *adev = hwmgr->adev;
1506 
1507     if (!amdgpu_device_has_dc_support(adev)) {
1508         amdgpu_dpm_get_active_displays(adev);
1509         adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1510         adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1511         adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1512         /* we have issues with mclk switching with
1513          * refresh rates over 120 hz on the non-DC code.
1514          */
1515         if (adev->pm.pm_display_cfg.vrefresh > 120)
1516             adev->pm.pm_display_cfg.min_vblank_time = 0;
1517 
1518         pp_display_configuration_change(handle,
1519                         &adev->pm.pm_display_cfg);
1520     }
1521 
1522     pp_dpm_dispatch_tasks(handle,
1523                   AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1524                   NULL);
1525 }
1526 
1527 static const struct amd_pm_funcs pp_dpm_funcs = {
1528     .load_firmware = pp_dpm_load_fw,
1529     .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1530     .force_performance_level = pp_dpm_force_performance_level,
1531     .get_performance_level = pp_dpm_get_performance_level,
1532     .get_current_power_state = pp_dpm_get_current_power_state,
1533     .dispatch_tasks = pp_dpm_dispatch_tasks,
1534     .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1535     .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1536     .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1537     .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1538     .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1539     .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1540     .get_pp_num_states = pp_dpm_get_pp_num_states,
1541     .get_pp_table = pp_dpm_get_pp_table,
1542     .set_pp_table = pp_dpm_set_pp_table,
1543     .force_clock_level = pp_dpm_force_clock_level,
1544     .emit_clock_levels = pp_dpm_emit_clock_levels,
1545     .print_clock_levels = pp_dpm_print_clock_levels,
1546     .get_sclk_od = pp_dpm_get_sclk_od,
1547     .set_sclk_od = pp_dpm_set_sclk_od,
1548     .get_mclk_od = pp_dpm_get_mclk_od,
1549     .set_mclk_od = pp_dpm_set_mclk_od,
1550     .read_sensor = pp_dpm_read_sensor,
1551     .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1552     .switch_power_profile = pp_dpm_switch_power_profile,
1553     .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1554     .set_powergating_by_smu = pp_set_powergating_by_smu,
1555     .get_power_profile_mode = pp_get_power_profile_mode,
1556     .set_power_profile_mode = pp_set_power_profile_mode,
1557     .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1558     .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1559     .set_mp1_state = pp_dpm_set_mp1_state,
1560     .set_power_limit = pp_set_power_limit,
1561     .get_power_limit = pp_get_power_limit,
1562 /* export to DC */
1563     .get_sclk = pp_dpm_get_sclk,
1564     .get_mclk = pp_dpm_get_mclk,
1565     .display_configuration_change = pp_display_configuration_change,
1566     .get_display_power_level = pp_get_display_power_level,
1567     .get_current_clocks = pp_get_current_clocks,
1568     .get_clock_by_type = pp_get_clock_by_type,
1569     .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1570     .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1571     .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1572     .display_clock_voltage_request = pp_display_clock_voltage_request,
1573     .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1574     .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1575     .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1576     .set_active_display_count = pp_set_active_display_count,
1577     .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1578     .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1579     .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1580     .get_asic_baco_capability = pp_get_asic_baco_capability,
1581     .get_asic_baco_state = pp_get_asic_baco_state,
1582     .set_asic_baco_state = pp_set_asic_baco_state,
1583     .get_ppfeature_status = pp_get_ppfeature_status,
1584     .set_ppfeature_status = pp_set_ppfeature_status,
1585     .asic_reset_mode_2 = pp_asic_reset_mode_2,
1586     .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1587     .set_df_cstate = pp_set_df_cstate,
1588     .set_xgmi_pstate = pp_set_xgmi_pstate,
1589     .get_gpu_metrics = pp_get_gpu_metrics,
1590     .gfx_state_change_set = pp_gfx_state_change_set,
1591     .get_smu_prv_buf_details = pp_get_prv_buffer_details,
1592     .pm_compute_clocks = pp_pm_compute_clocks,
1593 };