0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "amdgpu.h"
0026 #include "amdgpu_atombios.h"
0027 #include "amdgpu_i2c.h"
0028 #include "amdgpu_dpm.h"
0029 #include "atom.h"
0030 #include "amd_pcie.h"
0031 #include "amdgpu_display.h"
0032 #include "hwmgr.h"
0033 #include <linux/power_supply.h>
0034 #include "amdgpu_smu.h"
0035
0036 #define amdgpu_dpm_enable_bapm(adev, e) \
0037 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
0038
0039 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
0040 {
0041 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0042 int ret = 0;
0043
0044 if (!pp_funcs->get_sclk)
0045 return 0;
0046
0047 mutex_lock(&adev->pm.mutex);
0048 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
0049 low);
0050 mutex_unlock(&adev->pm.mutex);
0051
0052 return ret;
0053 }
0054
0055 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
0056 {
0057 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0058 int ret = 0;
0059
0060 if (!pp_funcs->get_mclk)
0061 return 0;
0062
0063 mutex_lock(&adev->pm.mutex);
0064 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
0065 low);
0066 mutex_unlock(&adev->pm.mutex);
0067
0068 return ret;
0069 }
0070
0071 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
0072 {
0073 int ret = 0;
0074 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0075 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
0076
0077 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
0078 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
0079 block_type, gate ? "gate" : "ungate");
0080 return 0;
0081 }
0082
0083 mutex_lock(&adev->pm.mutex);
0084
0085 switch (block_type) {
0086 case AMD_IP_BLOCK_TYPE_UVD:
0087 case AMD_IP_BLOCK_TYPE_VCE:
0088 case AMD_IP_BLOCK_TYPE_GFX:
0089 case AMD_IP_BLOCK_TYPE_VCN:
0090 case AMD_IP_BLOCK_TYPE_SDMA:
0091 case AMD_IP_BLOCK_TYPE_JPEG:
0092 case AMD_IP_BLOCK_TYPE_GMC:
0093 case AMD_IP_BLOCK_TYPE_ACP:
0094 if (pp_funcs && pp_funcs->set_powergating_by_smu)
0095 ret = (pp_funcs->set_powergating_by_smu(
0096 (adev)->powerplay.pp_handle, block_type, gate));
0097 break;
0098 default:
0099 break;
0100 }
0101
0102 if (!ret)
0103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
0104
0105 mutex_unlock(&adev->pm.mutex);
0106
0107 return ret;
0108 }
0109
0110 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
0111 {
0112 struct smu_context *smu = adev->powerplay.pp_handle;
0113 int ret = -EOPNOTSUPP;
0114
0115 mutex_lock(&adev->pm.mutex);
0116 ret = smu_set_gfx_power_up_by_imu(smu);
0117 mutex_unlock(&adev->pm.mutex);
0118
0119 msleep(10);
0120
0121 return ret;
0122 }
0123
0124 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
0125 {
0126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0127 void *pp_handle = adev->powerplay.pp_handle;
0128 int ret = 0;
0129
0130 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
0131 return -ENOENT;
0132
0133 mutex_lock(&adev->pm.mutex);
0134
0135
0136 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
0137
0138 mutex_unlock(&adev->pm.mutex);
0139
0140 return ret;
0141 }
0142
0143 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
0144 {
0145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0146 void *pp_handle = adev->powerplay.pp_handle;
0147 int ret = 0;
0148
0149 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
0150 return -ENOENT;
0151
0152 mutex_lock(&adev->pm.mutex);
0153
0154
0155 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
0156
0157 mutex_unlock(&adev->pm.mutex);
0158
0159 return ret;
0160 }
0161
0162 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
0163 enum pp_mp1_state mp1_state)
0164 {
0165 int ret = 0;
0166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0167
0168 if (pp_funcs && pp_funcs->set_mp1_state) {
0169 mutex_lock(&adev->pm.mutex);
0170
0171 ret = pp_funcs->set_mp1_state(
0172 adev->powerplay.pp_handle,
0173 mp1_state);
0174
0175 mutex_unlock(&adev->pm.mutex);
0176 }
0177
0178 return ret;
0179 }
0180
0181 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
0182 {
0183 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0184 void *pp_handle = adev->powerplay.pp_handle;
0185 bool baco_cap;
0186 int ret = 0;
0187
0188 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
0189 return false;
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 if (adev->in_s3)
0200 return false;
0201
0202 mutex_lock(&adev->pm.mutex);
0203
0204 ret = pp_funcs->get_asic_baco_capability(pp_handle,
0205 &baco_cap);
0206
0207 mutex_unlock(&adev->pm.mutex);
0208
0209 return ret ? false : baco_cap;
0210 }
0211
0212 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
0213 {
0214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0215 void *pp_handle = adev->powerplay.pp_handle;
0216 int ret = 0;
0217
0218 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
0219 return -ENOENT;
0220
0221 mutex_lock(&adev->pm.mutex);
0222
0223 ret = pp_funcs->asic_reset_mode_2(pp_handle);
0224
0225 mutex_unlock(&adev->pm.mutex);
0226
0227 return ret;
0228 }
0229
0230 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
0231 {
0232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0233 void *pp_handle = adev->powerplay.pp_handle;
0234 int ret = 0;
0235
0236 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
0237 return -ENOENT;
0238
0239 mutex_lock(&adev->pm.mutex);
0240
0241
0242 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
0243 if (ret)
0244 goto out;
0245
0246
0247 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
0248
0249 out:
0250 mutex_unlock(&adev->pm.mutex);
0251 return ret;
0252 }
0253
0254 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
0255 {
0256 struct smu_context *smu = adev->powerplay.pp_handle;
0257 bool support_mode1_reset = false;
0258
0259 if (is_support_sw_smu(adev)) {
0260 mutex_lock(&adev->pm.mutex);
0261 support_mode1_reset = smu_mode1_reset_is_support(smu);
0262 mutex_unlock(&adev->pm.mutex);
0263 }
0264
0265 return support_mode1_reset;
0266 }
0267
0268 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
0269 {
0270 struct smu_context *smu = adev->powerplay.pp_handle;
0271 int ret = -EOPNOTSUPP;
0272
0273 if (is_support_sw_smu(adev)) {
0274 mutex_lock(&adev->pm.mutex);
0275 ret = smu_mode1_reset(smu);
0276 mutex_unlock(&adev->pm.mutex);
0277 }
0278
0279 return ret;
0280 }
0281
0282 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
0283 enum PP_SMC_POWER_PROFILE type,
0284 bool en)
0285 {
0286 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0287 int ret = 0;
0288
0289 if (amdgpu_sriov_vf(adev))
0290 return 0;
0291
0292 if (pp_funcs && pp_funcs->switch_power_profile) {
0293 mutex_lock(&adev->pm.mutex);
0294 ret = pp_funcs->switch_power_profile(
0295 adev->powerplay.pp_handle, type, en);
0296 mutex_unlock(&adev->pm.mutex);
0297 }
0298
0299 return ret;
0300 }
0301
0302 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
0303 uint32_t pstate)
0304 {
0305 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0306 int ret = 0;
0307
0308 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
0309 mutex_lock(&adev->pm.mutex);
0310 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
0311 pstate);
0312 mutex_unlock(&adev->pm.mutex);
0313 }
0314
0315 return ret;
0316 }
0317
0318 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
0319 uint32_t cstate)
0320 {
0321 int ret = 0;
0322 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0323 void *pp_handle = adev->powerplay.pp_handle;
0324
0325 if (pp_funcs && pp_funcs->set_df_cstate) {
0326 mutex_lock(&adev->pm.mutex);
0327 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
0328 mutex_unlock(&adev->pm.mutex);
0329 }
0330
0331 return ret;
0332 }
0333
0334 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
0335 {
0336 struct smu_context *smu = adev->powerplay.pp_handle;
0337 int ret = 0;
0338
0339 if (is_support_sw_smu(adev)) {
0340 mutex_lock(&adev->pm.mutex);
0341 ret = smu_allow_xgmi_power_down(smu, en);
0342 mutex_unlock(&adev->pm.mutex);
0343 }
0344
0345 return ret;
0346 }
0347
0348 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
0349 {
0350 void *pp_handle = adev->powerplay.pp_handle;
0351 const struct amd_pm_funcs *pp_funcs =
0352 adev->powerplay.pp_funcs;
0353 int ret = 0;
0354
0355 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
0356 mutex_lock(&adev->pm.mutex);
0357 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
0358 mutex_unlock(&adev->pm.mutex);
0359 }
0360
0361 return ret;
0362 }
0363
0364 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
0365 uint32_t msg_id)
0366 {
0367 void *pp_handle = adev->powerplay.pp_handle;
0368 const struct amd_pm_funcs *pp_funcs =
0369 adev->powerplay.pp_funcs;
0370 int ret = 0;
0371
0372 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
0373 mutex_lock(&adev->pm.mutex);
0374 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
0375 msg_id);
0376 mutex_unlock(&adev->pm.mutex);
0377 }
0378
0379 return ret;
0380 }
0381
0382 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
0383 bool acquire)
0384 {
0385 void *pp_handle = adev->powerplay.pp_handle;
0386 const struct amd_pm_funcs *pp_funcs =
0387 adev->powerplay.pp_funcs;
0388 int ret = -EOPNOTSUPP;
0389
0390 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
0391 mutex_lock(&adev->pm.mutex);
0392 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
0393 acquire);
0394 mutex_unlock(&adev->pm.mutex);
0395 }
0396
0397 return ret;
0398 }
0399
0400 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
0401 {
0402 if (adev->pm.dpm_enabled) {
0403 mutex_lock(&adev->pm.mutex);
0404 if (power_supply_is_system_supplied() > 0)
0405 adev->pm.ac_power = true;
0406 else
0407 adev->pm.ac_power = false;
0408
0409 if (adev->powerplay.pp_funcs &&
0410 adev->powerplay.pp_funcs->enable_bapm)
0411 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
0412
0413 if (is_support_sw_smu(adev))
0414 smu_set_ac_dc(adev->powerplay.pp_handle);
0415
0416 mutex_unlock(&adev->pm.mutex);
0417 }
0418 }
0419
0420 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
0421 void *data, uint32_t *size)
0422 {
0423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0424 int ret = -EINVAL;
0425
0426 if (!data || !size)
0427 return -EINVAL;
0428
0429 if (pp_funcs && pp_funcs->read_sensor) {
0430 mutex_lock(&adev->pm.mutex);
0431 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
0432 sensor,
0433 data,
0434 size);
0435 mutex_unlock(&adev->pm.mutex);
0436 }
0437
0438 return ret;
0439 }
0440
0441 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
0442 {
0443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0444 int i;
0445
0446 if (!adev->pm.dpm_enabled)
0447 return;
0448
0449 if (!pp_funcs->pm_compute_clocks)
0450 return;
0451
0452 if (adev->mode_info.num_crtc)
0453 amdgpu_display_bandwidth_update(adev);
0454
0455 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
0456 struct amdgpu_ring *ring = adev->rings[i];
0457 if (ring && ring->sched.ready)
0458 amdgpu_fence_wait_empty(ring);
0459 }
0460
0461 mutex_lock(&adev->pm.mutex);
0462 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
0463 mutex_unlock(&adev->pm.mutex);
0464 }
0465
0466 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
0467 {
0468 int ret = 0;
0469
0470 if (adev->family == AMDGPU_FAMILY_SI) {
0471 mutex_lock(&adev->pm.mutex);
0472 if (enable) {
0473 adev->pm.dpm.uvd_active = true;
0474 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
0475 } else {
0476 adev->pm.dpm.uvd_active = false;
0477 }
0478 mutex_unlock(&adev->pm.mutex);
0479
0480 amdgpu_dpm_compute_clocks(adev);
0481 return;
0482 }
0483
0484 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
0485 if (ret)
0486 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
0487 enable ? "enable" : "disable", ret);
0488 }
0489
0490 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
0491 {
0492 int ret = 0;
0493
0494 if (adev->family == AMDGPU_FAMILY_SI) {
0495 mutex_lock(&adev->pm.mutex);
0496 if (enable) {
0497 adev->pm.dpm.vce_active = true;
0498
0499 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
0500 } else {
0501 adev->pm.dpm.vce_active = false;
0502 }
0503 mutex_unlock(&adev->pm.mutex);
0504
0505 amdgpu_dpm_compute_clocks(adev);
0506 return;
0507 }
0508
0509 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
0510 if (ret)
0511 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
0512 enable ? "enable" : "disable", ret);
0513 }
0514
0515 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
0516 {
0517 int ret = 0;
0518
0519 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
0520 if (ret)
0521 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
0522 enable ? "enable" : "disable", ret);
0523 }
0524
0525 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
0526 {
0527 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0528 int r = 0;
0529
0530 if (!pp_funcs || !pp_funcs->load_firmware)
0531 return 0;
0532
0533 mutex_lock(&adev->pm.mutex);
0534 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
0535 if (r) {
0536 pr_err("smu firmware loading failed\n");
0537 goto out;
0538 }
0539
0540 if (smu_version)
0541 *smu_version = adev->pm.fw_version;
0542
0543 out:
0544 mutex_unlock(&adev->pm.mutex);
0545 return r;
0546 }
0547
0548 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
0549 {
0550 int ret = 0;
0551
0552 if (is_support_sw_smu(adev)) {
0553 mutex_lock(&adev->pm.mutex);
0554 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
0555 enable);
0556 mutex_unlock(&adev->pm.mutex);
0557 }
0558
0559 return ret;
0560 }
0561
0562 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
0563 {
0564 struct smu_context *smu = adev->powerplay.pp_handle;
0565 int ret = 0;
0566
0567 if (!is_support_sw_smu(adev))
0568 return -EOPNOTSUPP;
0569
0570 mutex_lock(&adev->pm.mutex);
0571 ret = smu_send_hbm_bad_pages_num(smu, size);
0572 mutex_unlock(&adev->pm.mutex);
0573
0574 return ret;
0575 }
0576
0577 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
0578 {
0579 struct smu_context *smu = adev->powerplay.pp_handle;
0580 int ret = 0;
0581
0582 if (!is_support_sw_smu(adev))
0583 return -EOPNOTSUPP;
0584
0585 mutex_lock(&adev->pm.mutex);
0586 ret = smu_send_hbm_bad_channel_flag(smu, size);
0587 mutex_unlock(&adev->pm.mutex);
0588
0589 return ret;
0590 }
0591
0592 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
0593 enum pp_clock_type type,
0594 uint32_t *min,
0595 uint32_t *max)
0596 {
0597 int ret = 0;
0598
0599 if (type != PP_SCLK)
0600 return -EINVAL;
0601
0602 if (!is_support_sw_smu(adev))
0603 return -EOPNOTSUPP;
0604
0605 mutex_lock(&adev->pm.mutex);
0606 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
0607 SMU_SCLK,
0608 min,
0609 max);
0610 mutex_unlock(&adev->pm.mutex);
0611
0612 return ret;
0613 }
0614
0615 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
0616 enum pp_clock_type type,
0617 uint32_t min,
0618 uint32_t max)
0619 {
0620 struct smu_context *smu = adev->powerplay.pp_handle;
0621 int ret = 0;
0622
0623 if (type != PP_SCLK)
0624 return -EINVAL;
0625
0626 if (!is_support_sw_smu(adev))
0627 return -EOPNOTSUPP;
0628
0629 mutex_lock(&adev->pm.mutex);
0630 ret = smu_set_soft_freq_range(smu,
0631 SMU_SCLK,
0632 min,
0633 max);
0634 mutex_unlock(&adev->pm.mutex);
0635
0636 return ret;
0637 }
0638
0639 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
0640 {
0641 struct smu_context *smu = adev->powerplay.pp_handle;
0642 int ret = 0;
0643
0644 if (!is_support_sw_smu(adev))
0645 return 0;
0646
0647 mutex_lock(&adev->pm.mutex);
0648 ret = smu_write_watermarks_table(smu);
0649 mutex_unlock(&adev->pm.mutex);
0650
0651 return ret;
0652 }
0653
0654 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
0655 enum smu_event_type event,
0656 uint64_t event_arg)
0657 {
0658 struct smu_context *smu = adev->powerplay.pp_handle;
0659 int ret = 0;
0660
0661 if (!is_support_sw_smu(adev))
0662 return -EOPNOTSUPP;
0663
0664 mutex_lock(&adev->pm.mutex);
0665 ret = smu_wait_for_event(smu, event, event_arg);
0666 mutex_unlock(&adev->pm.mutex);
0667
0668 return ret;
0669 }
0670
0671 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
0672 {
0673 struct smu_context *smu = adev->powerplay.pp_handle;
0674 int ret = 0;
0675
0676 if (!is_support_sw_smu(adev))
0677 return -EOPNOTSUPP;
0678
0679 mutex_lock(&adev->pm.mutex);
0680 ret = smu_get_status_gfxoff(smu, value);
0681 mutex_unlock(&adev->pm.mutex);
0682
0683 return ret;
0684 }
0685
0686 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
0687 {
0688 struct smu_context *smu = adev->powerplay.pp_handle;
0689
0690 if (!is_support_sw_smu(adev))
0691 return 0;
0692
0693 return atomic64_read(&smu->throttle_int_counter);
0694 }
0695
0696
0697
0698
0699
0700
0701 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
0702 enum gfx_change_state state)
0703 {
0704 mutex_lock(&adev->pm.mutex);
0705 if (adev->powerplay.pp_funcs &&
0706 adev->powerplay.pp_funcs->gfx_state_change_set)
0707 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
0708 (adev)->powerplay.pp_handle, state));
0709 mutex_unlock(&adev->pm.mutex);
0710 }
0711
0712 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
0713 void *umc_ecc)
0714 {
0715 struct smu_context *smu = adev->powerplay.pp_handle;
0716 int ret = 0;
0717
0718 if (!is_support_sw_smu(adev))
0719 return -EOPNOTSUPP;
0720
0721 mutex_lock(&adev->pm.mutex);
0722 ret = smu_get_ecc_info(smu, umc_ecc);
0723 mutex_unlock(&adev->pm.mutex);
0724
0725 return ret;
0726 }
0727
0728 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
0729 uint32_t idx)
0730 {
0731 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0732 struct amd_vce_state *vstate = NULL;
0733
0734 if (!pp_funcs->get_vce_clock_state)
0735 return NULL;
0736
0737 mutex_lock(&adev->pm.mutex);
0738 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
0739 idx);
0740 mutex_unlock(&adev->pm.mutex);
0741
0742 return vstate;
0743 }
0744
0745 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
0746 enum amd_pm_state_type *state)
0747 {
0748 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0749
0750 mutex_lock(&adev->pm.mutex);
0751
0752 if (!pp_funcs->get_current_power_state) {
0753 *state = adev->pm.dpm.user_state;
0754 goto out;
0755 }
0756
0757 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
0758 if (*state < POWER_STATE_TYPE_DEFAULT ||
0759 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
0760 *state = adev->pm.dpm.user_state;
0761
0762 out:
0763 mutex_unlock(&adev->pm.mutex);
0764 }
0765
0766 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
0767 enum amd_pm_state_type state)
0768 {
0769 mutex_lock(&adev->pm.mutex);
0770 adev->pm.dpm.user_state = state;
0771 mutex_unlock(&adev->pm.mutex);
0772
0773 if (is_support_sw_smu(adev))
0774 return;
0775
0776 if (amdgpu_dpm_dispatch_task(adev,
0777 AMD_PP_TASK_ENABLE_USER_STATE,
0778 &state) == -EOPNOTSUPP)
0779 amdgpu_dpm_compute_clocks(adev);
0780 }
0781
0782 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
0783 {
0784 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0785 enum amd_dpm_forced_level level;
0786
0787 if (!pp_funcs)
0788 return AMD_DPM_FORCED_LEVEL_AUTO;
0789
0790 mutex_lock(&adev->pm.mutex);
0791 if (pp_funcs->get_performance_level)
0792 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
0793 else
0794 level = adev->pm.dpm.forced_level;
0795 mutex_unlock(&adev->pm.mutex);
0796
0797 return level;
0798 }
0799
0800 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
0801 enum amd_dpm_forced_level level)
0802 {
0803 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0804 enum amd_dpm_forced_level current_level;
0805 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
0806 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
0807 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
0808 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
0809
0810 if (!pp_funcs || !pp_funcs->force_performance_level)
0811 return 0;
0812
0813 if (adev->pm.dpm.thermal_active)
0814 return -EINVAL;
0815
0816 current_level = amdgpu_dpm_get_performance_level(adev);
0817 if (current_level == level)
0818 return 0;
0819
0820 if (adev->asic_type == CHIP_RAVEN) {
0821 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
0822 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
0823 level == AMD_DPM_FORCED_LEVEL_MANUAL)
0824 amdgpu_gfx_off_ctrl(adev, false);
0825 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
0826 level != AMD_DPM_FORCED_LEVEL_MANUAL)
0827 amdgpu_gfx_off_ctrl(adev, true);
0828 }
0829 }
0830
0831 if (!(current_level & profile_mode_mask) &&
0832 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
0833 return -EINVAL;
0834
0835 if (!(current_level & profile_mode_mask) &&
0836 (level & profile_mode_mask)) {
0837
0838 amdgpu_device_ip_set_powergating_state(adev,
0839 AMD_IP_BLOCK_TYPE_GFX,
0840 AMD_PG_STATE_UNGATE);
0841 amdgpu_device_ip_set_clockgating_state(adev,
0842 AMD_IP_BLOCK_TYPE_GFX,
0843 AMD_CG_STATE_UNGATE);
0844 } else if ((current_level & profile_mode_mask) &&
0845 !(level & profile_mode_mask)) {
0846
0847 amdgpu_device_ip_set_clockgating_state(adev,
0848 AMD_IP_BLOCK_TYPE_GFX,
0849 AMD_CG_STATE_GATE);
0850 amdgpu_device_ip_set_powergating_state(adev,
0851 AMD_IP_BLOCK_TYPE_GFX,
0852 AMD_PG_STATE_GATE);
0853 }
0854
0855 mutex_lock(&adev->pm.mutex);
0856
0857 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
0858 level)) {
0859 mutex_unlock(&adev->pm.mutex);
0860 return -EINVAL;
0861 }
0862
0863 adev->pm.dpm.forced_level = level;
0864
0865 mutex_unlock(&adev->pm.mutex);
0866
0867 return 0;
0868 }
0869
0870 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
0871 struct pp_states_info *states)
0872 {
0873 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0874 int ret = 0;
0875
0876 if (!pp_funcs->get_pp_num_states)
0877 return -EOPNOTSUPP;
0878
0879 mutex_lock(&adev->pm.mutex);
0880 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
0881 states);
0882 mutex_unlock(&adev->pm.mutex);
0883
0884 return ret;
0885 }
0886
0887 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
0888 enum amd_pp_task task_id,
0889 enum amd_pm_state_type *user_state)
0890 {
0891 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0892 int ret = 0;
0893
0894 if (!pp_funcs->dispatch_tasks)
0895 return -EOPNOTSUPP;
0896
0897 mutex_lock(&adev->pm.mutex);
0898 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
0899 task_id,
0900 user_state);
0901 mutex_unlock(&adev->pm.mutex);
0902
0903 return ret;
0904 }
0905
0906 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
0907 {
0908 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0909 int ret = 0;
0910
0911 if (!pp_funcs->get_pp_table)
0912 return 0;
0913
0914 mutex_lock(&adev->pm.mutex);
0915 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
0916 table);
0917 mutex_unlock(&adev->pm.mutex);
0918
0919 return ret;
0920 }
0921
0922 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
0923 uint32_t type,
0924 long *input,
0925 uint32_t size)
0926 {
0927 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0928 int ret = 0;
0929
0930 if (!pp_funcs->set_fine_grain_clk_vol)
0931 return 0;
0932
0933 mutex_lock(&adev->pm.mutex);
0934 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
0935 type,
0936 input,
0937 size);
0938 mutex_unlock(&adev->pm.mutex);
0939
0940 return ret;
0941 }
0942
0943 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
0944 uint32_t type,
0945 long *input,
0946 uint32_t size)
0947 {
0948 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0949 int ret = 0;
0950
0951 if (!pp_funcs->odn_edit_dpm_table)
0952 return 0;
0953
0954 mutex_lock(&adev->pm.mutex);
0955 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
0956 type,
0957 input,
0958 size);
0959 mutex_unlock(&adev->pm.mutex);
0960
0961 return ret;
0962 }
0963
0964 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
0965 enum pp_clock_type type,
0966 char *buf)
0967 {
0968 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0969 int ret = 0;
0970
0971 if (!pp_funcs->print_clock_levels)
0972 return 0;
0973
0974 mutex_lock(&adev->pm.mutex);
0975 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
0976 type,
0977 buf);
0978 mutex_unlock(&adev->pm.mutex);
0979
0980 return ret;
0981 }
0982
0983 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
0984 enum pp_clock_type type,
0985 char *buf,
0986 int *offset)
0987 {
0988 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0989 int ret = 0;
0990
0991 if (!pp_funcs->emit_clock_levels)
0992 return -ENOENT;
0993
0994 mutex_lock(&adev->pm.mutex);
0995 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
0996 type,
0997 buf,
0998 offset);
0999 mutex_unlock(&adev->pm.mutex);
1000
1001 return ret;
1002 }
1003
1004 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1005 uint64_t ppfeature_masks)
1006 {
1007 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1008 int ret = 0;
1009
1010 if (!pp_funcs->set_ppfeature_status)
1011 return 0;
1012
1013 mutex_lock(&adev->pm.mutex);
1014 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1015 ppfeature_masks);
1016 mutex_unlock(&adev->pm.mutex);
1017
1018 return ret;
1019 }
1020
1021 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1022 {
1023 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1024 int ret = 0;
1025
1026 if (!pp_funcs->get_ppfeature_status)
1027 return 0;
1028
1029 mutex_lock(&adev->pm.mutex);
1030 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1031 buf);
1032 mutex_unlock(&adev->pm.mutex);
1033
1034 return ret;
1035 }
1036
1037 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1038 enum pp_clock_type type,
1039 uint32_t mask)
1040 {
1041 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1042 int ret = 0;
1043
1044 if (!pp_funcs->force_clock_level)
1045 return 0;
1046
1047 mutex_lock(&adev->pm.mutex);
1048 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1049 type,
1050 mask);
1051 mutex_unlock(&adev->pm.mutex);
1052
1053 return ret;
1054 }
1055
1056 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1057 {
1058 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1059 int ret = 0;
1060
1061 if (!pp_funcs->get_sclk_od)
1062 return 0;
1063
1064 mutex_lock(&adev->pm.mutex);
1065 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1066 mutex_unlock(&adev->pm.mutex);
1067
1068 return ret;
1069 }
1070
1071 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1072 {
1073 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1074
1075 if (is_support_sw_smu(adev))
1076 return 0;
1077
1078 mutex_lock(&adev->pm.mutex);
1079 if (pp_funcs->set_sclk_od)
1080 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1081 mutex_unlock(&adev->pm.mutex);
1082
1083 if (amdgpu_dpm_dispatch_task(adev,
1084 AMD_PP_TASK_READJUST_POWER_STATE,
1085 NULL) == -EOPNOTSUPP) {
1086 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1087 amdgpu_dpm_compute_clocks(adev);
1088 }
1089
1090 return 0;
1091 }
1092
1093 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1094 {
1095 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1096 int ret = 0;
1097
1098 if (!pp_funcs->get_mclk_od)
1099 return 0;
1100
1101 mutex_lock(&adev->pm.mutex);
1102 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1103 mutex_unlock(&adev->pm.mutex);
1104
1105 return ret;
1106 }
1107
1108 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1109 {
1110 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1111
1112 if (is_support_sw_smu(adev))
1113 return 0;
1114
1115 mutex_lock(&adev->pm.mutex);
1116 if (pp_funcs->set_mclk_od)
1117 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1118 mutex_unlock(&adev->pm.mutex);
1119
1120 if (amdgpu_dpm_dispatch_task(adev,
1121 AMD_PP_TASK_READJUST_POWER_STATE,
1122 NULL) == -EOPNOTSUPP) {
1123 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1124 amdgpu_dpm_compute_clocks(adev);
1125 }
1126
1127 return 0;
1128 }
1129
1130 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1131 char *buf)
1132 {
1133 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1134 int ret = 0;
1135
1136 if (!pp_funcs->get_power_profile_mode)
1137 return -EOPNOTSUPP;
1138
1139 mutex_lock(&adev->pm.mutex);
1140 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1141 buf);
1142 mutex_unlock(&adev->pm.mutex);
1143
1144 return ret;
1145 }
1146
1147 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1148 long *input, uint32_t size)
1149 {
1150 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1151 int ret = 0;
1152
1153 if (!pp_funcs->set_power_profile_mode)
1154 return 0;
1155
1156 mutex_lock(&adev->pm.mutex);
1157 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1158 input,
1159 size);
1160 mutex_unlock(&adev->pm.mutex);
1161
1162 return ret;
1163 }
1164
1165 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1166 {
1167 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1168 int ret = 0;
1169
1170 if (!pp_funcs->get_gpu_metrics)
1171 return 0;
1172
1173 mutex_lock(&adev->pm.mutex);
1174 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1175 table);
1176 mutex_unlock(&adev->pm.mutex);
1177
1178 return ret;
1179 }
1180
1181 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1182 uint32_t *fan_mode)
1183 {
1184 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1185 int ret = 0;
1186
1187 if (!pp_funcs->get_fan_control_mode)
1188 return -EOPNOTSUPP;
1189
1190 mutex_lock(&adev->pm.mutex);
1191 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1192 fan_mode);
1193 mutex_unlock(&adev->pm.mutex);
1194
1195 return ret;
1196 }
1197
1198 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1199 uint32_t speed)
1200 {
1201 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1202 int ret = 0;
1203
1204 if (!pp_funcs->set_fan_speed_pwm)
1205 return -EOPNOTSUPP;
1206
1207 mutex_lock(&adev->pm.mutex);
1208 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1209 speed);
1210 mutex_unlock(&adev->pm.mutex);
1211
1212 return ret;
1213 }
1214
1215 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1216 uint32_t *speed)
1217 {
1218 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1219 int ret = 0;
1220
1221 if (!pp_funcs->get_fan_speed_pwm)
1222 return -EOPNOTSUPP;
1223
1224 mutex_lock(&adev->pm.mutex);
1225 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1226 speed);
1227 mutex_unlock(&adev->pm.mutex);
1228
1229 return ret;
1230 }
1231
1232 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1233 uint32_t *speed)
1234 {
1235 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1236 int ret = 0;
1237
1238 if (!pp_funcs->get_fan_speed_rpm)
1239 return -EOPNOTSUPP;
1240
1241 mutex_lock(&adev->pm.mutex);
1242 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1243 speed);
1244 mutex_unlock(&adev->pm.mutex);
1245
1246 return ret;
1247 }
1248
1249 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1250 uint32_t speed)
1251 {
1252 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1253 int ret = 0;
1254
1255 if (!pp_funcs->set_fan_speed_rpm)
1256 return -EOPNOTSUPP;
1257
1258 mutex_lock(&adev->pm.mutex);
1259 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1260 speed);
1261 mutex_unlock(&adev->pm.mutex);
1262
1263 return ret;
1264 }
1265
1266 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1267 uint32_t mode)
1268 {
1269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1270 int ret = 0;
1271
1272 if (!pp_funcs->set_fan_control_mode)
1273 return -EOPNOTSUPP;
1274
1275 mutex_lock(&adev->pm.mutex);
1276 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1277 mode);
1278 mutex_unlock(&adev->pm.mutex);
1279
1280 return ret;
1281 }
1282
1283 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1284 uint32_t *limit,
1285 enum pp_power_limit_level pp_limit_level,
1286 enum pp_power_type power_type)
1287 {
1288 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1289 int ret = 0;
1290
1291 if (!pp_funcs->get_power_limit)
1292 return -ENODATA;
1293
1294 mutex_lock(&adev->pm.mutex);
1295 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1296 limit,
1297 pp_limit_level,
1298 power_type);
1299 mutex_unlock(&adev->pm.mutex);
1300
1301 return ret;
1302 }
1303
1304 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1305 uint32_t limit)
1306 {
1307 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1308 int ret = 0;
1309
1310 if (!pp_funcs->set_power_limit)
1311 return -EINVAL;
1312
1313 mutex_lock(&adev->pm.mutex);
1314 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1315 limit);
1316 mutex_unlock(&adev->pm.mutex);
1317
1318 return ret;
1319 }
1320
1321 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1322 {
1323 bool cclk_dpm_supported = false;
1324
1325 if (!is_support_sw_smu(adev))
1326 return false;
1327
1328 mutex_lock(&adev->pm.mutex);
1329 cclk_dpm_supported = is_support_cclk_dpm(adev);
1330 mutex_unlock(&adev->pm.mutex);
1331
1332 return (int)cclk_dpm_supported;
1333 }
1334
1335 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1336 struct seq_file *m)
1337 {
1338 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1339
1340 if (!pp_funcs->debugfs_print_current_performance_level)
1341 return -EOPNOTSUPP;
1342
1343 mutex_lock(&adev->pm.mutex);
1344 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1345 m);
1346 mutex_unlock(&adev->pm.mutex);
1347
1348 return 0;
1349 }
1350
1351 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1352 void **addr,
1353 size_t *size)
1354 {
1355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1356 int ret = 0;
1357
1358 if (!pp_funcs->get_smu_prv_buf_details)
1359 return -ENOSYS;
1360
1361 mutex_lock(&adev->pm.mutex);
1362 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1363 addr,
1364 size);
1365 mutex_unlock(&adev->pm.mutex);
1366
1367 return ret;
1368 }
1369
1370 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1371 {
1372 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1373 struct smu_context *smu = adev->powerplay.pp_handle;
1374
1375 if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1376 (is_support_sw_smu(adev) && smu->is_apu) ||
1377 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1378 return true;
1379
1380 return false;
1381 }
1382
1383 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1384 const char *buf,
1385 size_t size)
1386 {
1387 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1388 int ret = 0;
1389
1390 if (!pp_funcs->set_pp_table)
1391 return -EOPNOTSUPP;
1392
1393 mutex_lock(&adev->pm.mutex);
1394 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1395 buf,
1396 size);
1397 mutex_unlock(&adev->pm.mutex);
1398
1399 return ret;
1400 }
1401
1402 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1403 {
1404 struct smu_context *smu = adev->powerplay.pp_handle;
1405
1406 if (!is_support_sw_smu(adev))
1407 return INT_MAX;
1408
1409 return smu->cpu_core_num;
1410 }
1411
1412 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1413 {
1414 if (!is_support_sw_smu(adev))
1415 return;
1416
1417 amdgpu_smu_stb_debug_fs_init(adev);
1418 }
1419
1420 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1421 const struct amd_pp_display_configuration *input)
1422 {
1423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1424 int ret = 0;
1425
1426 if (!pp_funcs->display_configuration_change)
1427 return 0;
1428
1429 mutex_lock(&adev->pm.mutex);
1430 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1431 input);
1432 mutex_unlock(&adev->pm.mutex);
1433
1434 return ret;
1435 }
1436
1437 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1438 enum amd_pp_clock_type type,
1439 struct amd_pp_clocks *clocks)
1440 {
1441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1442 int ret = 0;
1443
1444 if (!pp_funcs->get_clock_by_type)
1445 return 0;
1446
1447 mutex_lock(&adev->pm.mutex);
1448 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1449 type,
1450 clocks);
1451 mutex_unlock(&adev->pm.mutex);
1452
1453 return ret;
1454 }
1455
1456 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1457 struct amd_pp_simple_clock_info *clocks)
1458 {
1459 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1460 int ret = 0;
1461
1462 if (!pp_funcs->get_display_mode_validation_clocks)
1463 return 0;
1464
1465 mutex_lock(&adev->pm.mutex);
1466 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1467 clocks);
1468 mutex_unlock(&adev->pm.mutex);
1469
1470 return ret;
1471 }
1472
1473 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1474 enum amd_pp_clock_type type,
1475 struct pp_clock_levels_with_latency *clocks)
1476 {
1477 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1478 int ret = 0;
1479
1480 if (!pp_funcs->get_clock_by_type_with_latency)
1481 return 0;
1482
1483 mutex_lock(&adev->pm.mutex);
1484 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1485 type,
1486 clocks);
1487 mutex_unlock(&adev->pm.mutex);
1488
1489 return ret;
1490 }
1491
1492 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1493 enum amd_pp_clock_type type,
1494 struct pp_clock_levels_with_voltage *clocks)
1495 {
1496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1497 int ret = 0;
1498
1499 if (!pp_funcs->get_clock_by_type_with_voltage)
1500 return 0;
1501
1502 mutex_lock(&adev->pm.mutex);
1503 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1504 type,
1505 clocks);
1506 mutex_unlock(&adev->pm.mutex);
1507
1508 return ret;
1509 }
1510
1511 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1512 void *clock_ranges)
1513 {
1514 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1515 int ret = 0;
1516
1517 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1518 return -EOPNOTSUPP;
1519
1520 mutex_lock(&adev->pm.mutex);
1521 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1522 clock_ranges);
1523 mutex_unlock(&adev->pm.mutex);
1524
1525 return ret;
1526 }
1527
1528 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1529 struct pp_display_clock_request *clock)
1530 {
1531 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1532 int ret = 0;
1533
1534 if (!pp_funcs->display_clock_voltage_request)
1535 return -EOPNOTSUPP;
1536
1537 mutex_lock(&adev->pm.mutex);
1538 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1539 clock);
1540 mutex_unlock(&adev->pm.mutex);
1541
1542 return ret;
1543 }
1544
1545 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1546 struct amd_pp_clock_info *clocks)
1547 {
1548 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1549 int ret = 0;
1550
1551 if (!pp_funcs->get_current_clocks)
1552 return -EOPNOTSUPP;
1553
1554 mutex_lock(&adev->pm.mutex);
1555 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1556 clocks);
1557 mutex_unlock(&adev->pm.mutex);
1558
1559 return ret;
1560 }
1561
1562 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1563 {
1564 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1565
1566 if (!pp_funcs->notify_smu_enable_pwe)
1567 return;
1568
1569 mutex_lock(&adev->pm.mutex);
1570 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1571 mutex_unlock(&adev->pm.mutex);
1572 }
1573
1574 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1575 uint32_t count)
1576 {
1577 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1578 int ret = 0;
1579
1580 if (!pp_funcs->set_active_display_count)
1581 return -EOPNOTSUPP;
1582
1583 mutex_lock(&adev->pm.mutex);
1584 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1585 count);
1586 mutex_unlock(&adev->pm.mutex);
1587
1588 return ret;
1589 }
1590
1591 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1592 uint32_t clock)
1593 {
1594 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1595 int ret = 0;
1596
1597 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1598 return -EOPNOTSUPP;
1599
1600 mutex_lock(&adev->pm.mutex);
1601 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1602 clock);
1603 mutex_unlock(&adev->pm.mutex);
1604
1605 return ret;
1606 }
1607
1608 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1609 uint32_t clock)
1610 {
1611 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1612
1613 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1614 return;
1615
1616 mutex_lock(&adev->pm.mutex);
1617 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1618 clock);
1619 mutex_unlock(&adev->pm.mutex);
1620 }
1621
1622 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1623 uint32_t clock)
1624 {
1625 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1626
1627 if (!pp_funcs->set_hard_min_fclk_by_freq)
1628 return;
1629
1630 mutex_lock(&adev->pm.mutex);
1631 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1632 clock);
1633 mutex_unlock(&adev->pm.mutex);
1634 }
1635
1636 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1637 bool disable_memory_clock_switch)
1638 {
1639 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1640 int ret = 0;
1641
1642 if (!pp_funcs->display_disable_memory_clock_switch)
1643 return 0;
1644
1645 mutex_lock(&adev->pm.mutex);
1646 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1647 disable_memory_clock_switch);
1648 mutex_unlock(&adev->pm.mutex);
1649
1650 return ret;
1651 }
1652
1653 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1654 struct pp_smu_nv_clock_table *max_clocks)
1655 {
1656 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1657 int ret = 0;
1658
1659 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1660 return -EOPNOTSUPP;
1661
1662 mutex_lock(&adev->pm.mutex);
1663 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1664 max_clocks);
1665 mutex_unlock(&adev->pm.mutex);
1666
1667 return ret;
1668 }
1669
1670 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1671 unsigned int *clock_values_in_khz,
1672 unsigned int *num_states)
1673 {
1674 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1675 int ret = 0;
1676
1677 if (!pp_funcs->get_uclk_dpm_states)
1678 return -EOPNOTSUPP;
1679
1680 mutex_lock(&adev->pm.mutex);
1681 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1682 clock_values_in_khz,
1683 num_states);
1684 mutex_unlock(&adev->pm.mutex);
1685
1686 return ret;
1687 }
1688
1689 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1690 struct dpm_clocks *clock_table)
1691 {
1692 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1693 int ret = 0;
1694
1695 if (!pp_funcs->get_dpm_clock_table)
1696 return -EOPNOTSUPP;
1697
1698 mutex_lock(&adev->pm.mutex);
1699 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1700 clock_table);
1701 mutex_unlock(&adev->pm.mutex);
1702
1703 return ret;
1704 }