0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/string.h>
0025 #include <linux/acpi.h>
0026
0027 #include <drm/drm_probe_helper.h>
0028 #include <drm/amdgpu_drm.h>
0029 #include "dm_services.h"
0030 #include "amdgpu.h"
0031 #include "amdgpu_dm.h"
0032 #include "amdgpu_dm_irq.h"
0033 #include "amdgpu_pm.h"
0034 #include "dm_pp_smu.h"
0035
0036 bool dm_pp_apply_display_requirements(
0037 const struct dc_context *ctx,
0038 const struct dm_pp_display_configuration *pp_display_cfg)
0039 {
0040 struct amdgpu_device *adev = ctx->driver_context;
0041 int i;
0042
0043 if (adev->pm.dpm_enabled) {
0044
0045 memset(&adev->pm.pm_display_cfg, 0,
0046 sizeof(adev->pm.pm_display_cfg));
0047
0048 adev->pm.pm_display_cfg.cpu_cc6_disable =
0049 pp_display_cfg->cpu_cc6_disable;
0050
0051 adev->pm.pm_display_cfg.cpu_pstate_disable =
0052 pp_display_cfg->cpu_pstate_disable;
0053
0054 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
0055 pp_display_cfg->cpu_pstate_separation_time;
0056
0057 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
0058 pp_display_cfg->nb_pstate_switch_disable;
0059
0060 adev->pm.pm_display_cfg.num_display =
0061 pp_display_cfg->display_count;
0062 adev->pm.pm_display_cfg.num_path_including_non_display =
0063 pp_display_cfg->display_count;
0064
0065 adev->pm.pm_display_cfg.min_core_set_clock =
0066 pp_display_cfg->min_engine_clock_khz/10;
0067 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
0068 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
0069 adev->pm.pm_display_cfg.min_mem_set_clock =
0070 pp_display_cfg->min_memory_clock_khz/10;
0071
0072 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
0073 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
0074 adev->pm.pm_display_cfg.min_dcef_set_clk =
0075 pp_display_cfg->min_dcfclock_khz/10;
0076
0077 adev->pm.pm_display_cfg.multi_monitor_in_sync =
0078 pp_display_cfg->all_displays_in_sync;
0079 adev->pm.pm_display_cfg.min_vblank_time =
0080 pp_display_cfg->avail_mclk_switch_time_us;
0081
0082 adev->pm.pm_display_cfg.display_clk =
0083 pp_display_cfg->disp_clk_khz/10;
0084
0085 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
0086 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
0087
0088 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
0089 adev->pm.pm_display_cfg.line_time_in_us =
0090 pp_display_cfg->line_time_in_us;
0091
0092 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
0093 adev->pm.pm_display_cfg.crossfire_display_index = -1;
0094 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
0095
0096 for (i = 0; i < pp_display_cfg->display_count; i++) {
0097 const struct dm_pp_single_disp_config *dc_cfg =
0098 &pp_display_cfg->disp_configs[i];
0099 adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
0100 }
0101
0102 amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
0103
0104 amdgpu_dpm_compute_clocks(adev);
0105 }
0106
0107 return true;
0108 }
0109
0110 static void get_default_clock_levels(
0111 enum dm_pp_clock_type clk_type,
0112 struct dm_pp_clock_levels *clks)
0113 {
0114 uint32_t disp_clks_in_khz[6] = {
0115 300000, 400000, 496560, 626090, 685720, 757900 };
0116 uint32_t sclks_in_khz[6] = {
0117 300000, 360000, 423530, 514290, 626090, 720000 };
0118 uint32_t mclks_in_khz[2] = { 333000, 800000 };
0119
0120 switch (clk_type) {
0121 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
0122 clks->num_levels = 6;
0123 memmove(clks->clocks_in_khz, disp_clks_in_khz,
0124 sizeof(disp_clks_in_khz));
0125 break;
0126 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
0127 clks->num_levels = 6;
0128 memmove(clks->clocks_in_khz, sclks_in_khz,
0129 sizeof(sclks_in_khz));
0130 break;
0131 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
0132 clks->num_levels = 2;
0133 memmove(clks->clocks_in_khz, mclks_in_khz,
0134 sizeof(mclks_in_khz));
0135 break;
0136 default:
0137 clks->num_levels = 0;
0138 break;
0139 }
0140 }
0141
0142 static enum amd_pp_clock_type dc_to_pp_clock_type(
0143 enum dm_pp_clock_type dm_pp_clk_type)
0144 {
0145 enum amd_pp_clock_type amd_pp_clk_type = 0;
0146
0147 switch (dm_pp_clk_type) {
0148 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
0149 amd_pp_clk_type = amd_pp_disp_clock;
0150 break;
0151 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
0152 amd_pp_clk_type = amd_pp_sys_clock;
0153 break;
0154 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
0155 amd_pp_clk_type = amd_pp_mem_clock;
0156 break;
0157 case DM_PP_CLOCK_TYPE_DCEFCLK:
0158 amd_pp_clk_type = amd_pp_dcef_clock;
0159 break;
0160 case DM_PP_CLOCK_TYPE_DCFCLK:
0161 amd_pp_clk_type = amd_pp_dcf_clock;
0162 break;
0163 case DM_PP_CLOCK_TYPE_PIXELCLK:
0164 amd_pp_clk_type = amd_pp_pixel_clock;
0165 break;
0166 case DM_PP_CLOCK_TYPE_FCLK:
0167 amd_pp_clk_type = amd_pp_f_clock;
0168 break;
0169 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
0170 amd_pp_clk_type = amd_pp_phy_clock;
0171 break;
0172 case DM_PP_CLOCK_TYPE_DPPCLK:
0173 amd_pp_clk_type = amd_pp_dpp_clock;
0174 break;
0175 default:
0176 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
0177 dm_pp_clk_type);
0178 break;
0179 }
0180
0181 return amd_pp_clk_type;
0182 }
0183
0184 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
0185 enum PP_DAL_POWERLEVEL max_clocks_state)
0186 {
0187 switch (max_clocks_state) {
0188 case PP_DAL_POWERLEVEL_0:
0189 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
0190 case PP_DAL_POWERLEVEL_1:
0191 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
0192 case PP_DAL_POWERLEVEL_2:
0193 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
0194 case PP_DAL_POWERLEVEL_3:
0195 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
0196 case PP_DAL_POWERLEVEL_4:
0197 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
0198 case PP_DAL_POWERLEVEL_5:
0199 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
0200 case PP_DAL_POWERLEVEL_6:
0201 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
0202 case PP_DAL_POWERLEVEL_7:
0203 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
0204 default:
0205 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
0206 max_clocks_state);
0207 return DM_PP_CLOCKS_STATE_INVALID;
0208 }
0209 }
0210
0211 static void pp_to_dc_clock_levels(
0212 const struct amd_pp_clocks *pp_clks,
0213 struct dm_pp_clock_levels *dc_clks,
0214 enum dm_pp_clock_type dc_clk_type)
0215 {
0216 uint32_t i;
0217
0218 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
0219 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
0220 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
0221 pp_clks->count,
0222 DM_PP_MAX_CLOCK_LEVELS);
0223
0224 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
0225 } else
0226 dc_clks->num_levels = pp_clks->count;
0227
0228 DRM_INFO("DM_PPLIB: values for %s clock\n",
0229 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
0230
0231 for (i = 0; i < dc_clks->num_levels; i++) {
0232 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
0233 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
0234 }
0235 }
0236
0237 static void pp_to_dc_clock_levels_with_latency(
0238 const struct pp_clock_levels_with_latency *pp_clks,
0239 struct dm_pp_clock_levels_with_latency *clk_level_info,
0240 enum dm_pp_clock_type dc_clk_type)
0241 {
0242 uint32_t i;
0243
0244 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
0245 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
0246 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
0247 pp_clks->num_levels,
0248 DM_PP_MAX_CLOCK_LEVELS);
0249
0250 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
0251 } else
0252 clk_level_info->num_levels = pp_clks->num_levels;
0253
0254 DRM_DEBUG("DM_PPLIB: values for %s clock\n",
0255 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
0256
0257 for (i = 0; i < clk_level_info->num_levels; i++) {
0258 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
0259 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
0260 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
0261 }
0262 }
0263
0264 static void pp_to_dc_clock_levels_with_voltage(
0265 const struct pp_clock_levels_with_voltage *pp_clks,
0266 struct dm_pp_clock_levels_with_voltage *clk_level_info,
0267 enum dm_pp_clock_type dc_clk_type)
0268 {
0269 uint32_t i;
0270
0271 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
0272 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
0273 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
0274 pp_clks->num_levels,
0275 DM_PP_MAX_CLOCK_LEVELS);
0276
0277 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
0278 } else
0279 clk_level_info->num_levels = pp_clks->num_levels;
0280
0281 DRM_INFO("DM_PPLIB: values for %s clock\n",
0282 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
0283
0284 for (i = 0; i < clk_level_info->num_levels; i++) {
0285 DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
0286 pp_clks->data[i].voltage_in_mv);
0287 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
0288 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
0289 }
0290 }
0291
0292 bool dm_pp_get_clock_levels_by_type(
0293 const struct dc_context *ctx,
0294 enum dm_pp_clock_type clk_type,
0295 struct dm_pp_clock_levels *dc_clks)
0296 {
0297 struct amdgpu_device *adev = ctx->driver_context;
0298 struct amd_pp_clocks pp_clks = { 0 };
0299 struct amd_pp_simple_clock_info validation_clks = { 0 };
0300 uint32_t i;
0301
0302 if (amdgpu_dpm_get_clock_by_type(adev,
0303 dc_to_pp_clock_type(clk_type), &pp_clks)) {
0304
0305 get_default_clock_levels(clk_type, dc_clks);
0306 return true;
0307 }
0308
0309 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
0310
0311 if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) {
0312
0313 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
0314 validation_clks.engine_max_clock = 72000;
0315 validation_clks.memory_max_clock = 80000;
0316 validation_clks.level = 0;
0317 }
0318
0319 DRM_INFO("DM_PPLIB: Validation clocks:\n");
0320 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
0321 validation_clks.engine_max_clock);
0322 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
0323 validation_clks.memory_max_clock);
0324 DRM_INFO("DM_PPLIB: level : %d\n",
0325 validation_clks.level);
0326
0327
0328 validation_clks.engine_max_clock *= 10;
0329 validation_clks.memory_max_clock *= 10;
0330
0331
0332 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
0333 for (i = 0; i < dc_clks->num_levels; i++) {
0334 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
0335
0336
0337
0338 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
0339 dc_clks->num_levels, i);
0340 dc_clks->num_levels = i > 0 ? i : 1;
0341 break;
0342 }
0343 }
0344 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
0345 for (i = 0; i < dc_clks->num_levels; i++) {
0346 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
0347 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
0348 dc_clks->num_levels, i);
0349 dc_clks->num_levels = i > 0 ? i : 1;
0350 break;
0351 }
0352 }
0353 }
0354
0355 return true;
0356 }
0357
0358 bool dm_pp_get_clock_levels_by_type_with_latency(
0359 const struct dc_context *ctx,
0360 enum dm_pp_clock_type clk_type,
0361 struct dm_pp_clock_levels_with_latency *clk_level_info)
0362 {
0363 struct amdgpu_device *adev = ctx->driver_context;
0364 struct pp_clock_levels_with_latency pp_clks = { 0 };
0365 int ret;
0366
0367 ret = amdgpu_dpm_get_clock_by_type_with_latency(adev,
0368 dc_to_pp_clock_type(clk_type),
0369 &pp_clks);
0370 if (ret)
0371 return false;
0372
0373 pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
0374
0375 return true;
0376 }
0377
0378 bool dm_pp_get_clock_levels_by_type_with_voltage(
0379 const struct dc_context *ctx,
0380 enum dm_pp_clock_type clk_type,
0381 struct dm_pp_clock_levels_with_voltage *clk_level_info)
0382 {
0383 struct amdgpu_device *adev = ctx->driver_context;
0384 struct pp_clock_levels_with_voltage pp_clk_info = {0};
0385 int ret;
0386
0387 ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev,
0388 dc_to_pp_clock_type(clk_type),
0389 &pp_clk_info);
0390 if (ret)
0391 return false;
0392
0393 pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
0394
0395 return true;
0396 }
0397
0398 bool dm_pp_notify_wm_clock_changes(
0399 const struct dc_context *ctx,
0400 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
0401 {
0402 struct amdgpu_device *adev = ctx->driver_context;
0403
0404
0405
0406
0407
0408 if ((adev->asic_type >= CHIP_POLARIS10) &&
0409 (adev->asic_type <= CHIP_VEGAM) &&
0410 !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
0411 (void *)wm_with_clock_ranges))
0412 return true;
0413
0414 return false;
0415 }
0416
0417 bool dm_pp_apply_power_level_change_request(
0418 const struct dc_context *ctx,
0419 struct dm_pp_power_level_change_request *level_change_req)
0420 {
0421
0422 return false;
0423 }
0424
0425 bool dm_pp_apply_clock_for_voltage_request(
0426 const struct dc_context *ctx,
0427 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
0428 {
0429 struct amdgpu_device *adev = ctx->driver_context;
0430 struct pp_display_clock_request pp_clock_request = {0};
0431 int ret = 0;
0432
0433 pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
0434 pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
0435
0436 if (!pp_clock_request.clock_type)
0437 return false;
0438
0439 ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request);
0440 if (ret && (ret != -EOPNOTSUPP))
0441 return false;
0442
0443 return true;
0444 }
0445
0446 bool dm_pp_get_static_clocks(
0447 const struct dc_context *ctx,
0448 struct dm_pp_static_clock_info *static_clk_info)
0449 {
0450 struct amdgpu_device *adev = ctx->driver_context;
0451 struct amd_pp_clock_info pp_clk_info = {0};
0452
0453 if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info))
0454 return false;
0455
0456 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
0457 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
0458 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
0459
0460 return true;
0461 }
0462
0463 static void pp_rv_set_wm_ranges(struct pp_smu *pp,
0464 struct pp_smu_wm_range_sets *ranges)
0465 {
0466 const struct dc_context *ctx = pp->dm;
0467 struct amdgpu_device *adev = ctx->driver_context;
0468 struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
0469 struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
0470 struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
0471 int32_t i;
0472
0473 wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
0474 wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
0475
0476 for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
0477 if (ranges->reader_wm_sets[i].wm_inst > 3)
0478 wm_dce_clocks[i].wm_set_id = WM_SET_A;
0479 else
0480 wm_dce_clocks[i].wm_set_id =
0481 ranges->reader_wm_sets[i].wm_inst;
0482 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
0483 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
0484 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
0485 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
0486 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
0487 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
0488 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
0489 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
0490 }
0491
0492 for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
0493 if (ranges->writer_wm_sets[i].wm_inst > 3)
0494 wm_soc_clocks[i].wm_set_id = WM_SET_A;
0495 else
0496 wm_soc_clocks[i].wm_set_id =
0497 ranges->writer_wm_sets[i].wm_inst;
0498 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
0499 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
0500 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
0501 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
0502 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
0503 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
0504 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
0505 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
0506 }
0507
0508 amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
0509 &wm_with_clock_ranges);
0510 }
0511
0512 static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
0513 {
0514 const struct dc_context *ctx = pp->dm;
0515 struct amdgpu_device *adev = ctx->driver_context;
0516
0517 amdgpu_dpm_notify_smu_enable_pwe(adev);
0518 }
0519
0520 static void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
0521 {
0522 const struct dc_context *ctx = pp->dm;
0523 struct amdgpu_device *adev = ctx->driver_context;
0524
0525 amdgpu_dpm_set_active_display_count(adev, count);
0526 }
0527
0528 static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
0529 {
0530 const struct dc_context *ctx = pp->dm;
0531 struct amdgpu_device *adev = ctx->driver_context;
0532
0533 amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock);
0534 }
0535
0536 static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
0537 {
0538 const struct dc_context *ctx = pp->dm;
0539 struct amdgpu_device *adev = ctx->driver_context;
0540
0541 amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock);
0542 }
0543
0544 static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
0545 {
0546 const struct dc_context *ctx = pp->dm;
0547 struct amdgpu_device *adev = ctx->driver_context;
0548
0549 amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz);
0550 }
0551
0552 static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
0553 struct pp_smu_wm_range_sets *ranges)
0554 {
0555 const struct dc_context *ctx = pp->dm;
0556 struct amdgpu_device *adev = ctx->driver_context;
0557
0558 amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges);
0559
0560 return PP_SMU_RESULT_OK;
0561 }
0562
0563 static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
0564 {
0565 const struct dc_context *ctx = pp->dm;
0566 struct amdgpu_device *adev = ctx->driver_context;
0567 int ret = 0;
0568
0569 ret = amdgpu_dpm_set_active_display_count(adev, count);
0570 if (ret == -EOPNOTSUPP)
0571 return PP_SMU_RESULT_UNSUPPORTED;
0572 else if (ret)
0573
0574 return PP_SMU_RESULT_FAIL;
0575
0576 return PP_SMU_RESULT_OK;
0577 }
0578
0579 static enum pp_smu_status
0580 pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
0581 {
0582 const struct dc_context *ctx = pp->dm;
0583 struct amdgpu_device *adev = ctx->driver_context;
0584 int ret = 0;
0585
0586
0587 ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz);
0588 if (ret == -EOPNOTSUPP)
0589 return PP_SMU_RESULT_UNSUPPORTED;
0590 else if (ret)
0591 return PP_SMU_RESULT_FAIL;
0592
0593 return PP_SMU_RESULT_OK;
0594 }
0595
0596 static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
0597 struct pp_smu *pp, int mhz)
0598 {
0599 const struct dc_context *ctx = pp->dm;
0600 struct amdgpu_device *adev = ctx->driver_context;
0601 struct pp_display_clock_request clock_req;
0602 int ret = 0;
0603
0604 clock_req.clock_type = amd_pp_dcef_clock;
0605 clock_req.clock_freq_in_khz = mhz * 1000;
0606
0607
0608
0609
0610 ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
0611 if (ret == -EOPNOTSUPP)
0612 return PP_SMU_RESULT_UNSUPPORTED;
0613 else if (ret)
0614 return PP_SMU_RESULT_FAIL;
0615
0616 return PP_SMU_RESULT_OK;
0617 }
0618
0619 static enum pp_smu_status
0620 pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
0621 {
0622 const struct dc_context *ctx = pp->dm;
0623 struct amdgpu_device *adev = ctx->driver_context;
0624 struct pp_display_clock_request clock_req;
0625 int ret = 0;
0626
0627 clock_req.clock_type = amd_pp_mem_clock;
0628 clock_req.clock_freq_in_khz = mhz * 1000;
0629
0630
0631
0632
0633 ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
0634 if (ret == -EOPNOTSUPP)
0635 return PP_SMU_RESULT_UNSUPPORTED;
0636 else if (ret)
0637 return PP_SMU_RESULT_FAIL;
0638
0639 return PP_SMU_RESULT_OK;
0640 }
0641
0642 static enum pp_smu_status pp_nv_set_pstate_handshake_support(
0643 struct pp_smu *pp, bool pstate_handshake_supported)
0644 {
0645 const struct dc_context *ctx = pp->dm;
0646 struct amdgpu_device *adev = ctx->driver_context;
0647
0648 if (amdgpu_dpm_display_disable_memory_clock_switch(adev,
0649 !pstate_handshake_supported))
0650 return PP_SMU_RESULT_FAIL;
0651
0652 return PP_SMU_RESULT_OK;
0653 }
0654
0655 static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
0656 enum pp_smu_nv_clock_id clock_id, int mhz)
0657 {
0658 const struct dc_context *ctx = pp->dm;
0659 struct amdgpu_device *adev = ctx->driver_context;
0660 struct pp_display_clock_request clock_req;
0661 int ret = 0;
0662
0663 switch (clock_id) {
0664 case PP_SMU_NV_DISPCLK:
0665 clock_req.clock_type = amd_pp_disp_clock;
0666 break;
0667 case PP_SMU_NV_PHYCLK:
0668 clock_req.clock_type = amd_pp_phy_clock;
0669 break;
0670 case PP_SMU_NV_PIXELCLK:
0671 clock_req.clock_type = amd_pp_pixel_clock;
0672 break;
0673 default:
0674 break;
0675 }
0676 clock_req.clock_freq_in_khz = mhz * 1000;
0677
0678
0679
0680
0681 ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
0682 if (ret == -EOPNOTSUPP)
0683 return PP_SMU_RESULT_UNSUPPORTED;
0684 else if (ret)
0685 return PP_SMU_RESULT_FAIL;
0686
0687 return PP_SMU_RESULT_OK;
0688 }
0689
0690 static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
0691 struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
0692 {
0693 const struct dc_context *ctx = pp->dm;
0694 struct amdgpu_device *adev = ctx->driver_context;
0695 int ret = 0;
0696
0697 ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev,
0698 max_clocks);
0699 if (ret == -EOPNOTSUPP)
0700 return PP_SMU_RESULT_UNSUPPORTED;
0701 else if (ret)
0702 return PP_SMU_RESULT_FAIL;
0703
0704 return PP_SMU_RESULT_OK;
0705 }
0706
0707 static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
0708 unsigned int *clock_values_in_khz, unsigned int *num_states)
0709 {
0710 const struct dc_context *ctx = pp->dm;
0711 struct amdgpu_device *adev = ctx->driver_context;
0712 int ret = 0;
0713
0714 ret = amdgpu_dpm_get_uclk_dpm_states(adev,
0715 clock_values_in_khz,
0716 num_states);
0717 if (ret == -EOPNOTSUPP)
0718 return PP_SMU_RESULT_UNSUPPORTED;
0719 else if (ret)
0720 return PP_SMU_RESULT_FAIL;
0721
0722 return PP_SMU_RESULT_OK;
0723 }
0724
0725 static enum pp_smu_status pp_rn_get_dpm_clock_table(
0726 struct pp_smu *pp, struct dpm_clocks *clock_table)
0727 {
0728 const struct dc_context *ctx = pp->dm;
0729 struct amdgpu_device *adev = ctx->driver_context;
0730 int ret = 0;
0731
0732 ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table);
0733 if (ret == -EOPNOTSUPP)
0734 return PP_SMU_RESULT_UNSUPPORTED;
0735 else if (ret)
0736 return PP_SMU_RESULT_FAIL;
0737
0738 return PP_SMU_RESULT_OK;
0739 }
0740
0741 static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
0742 struct pp_smu_wm_range_sets *ranges)
0743 {
0744 const struct dc_context *ctx = pp->dm;
0745 struct amdgpu_device *adev = ctx->driver_context;
0746
0747 amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges);
0748
0749 return PP_SMU_RESULT_OK;
0750 }
0751
0752 void dm_pp_get_funcs(
0753 struct dc_context *ctx,
0754 struct pp_smu_funcs *funcs)
0755 {
0756 switch (ctx->dce_version) {
0757 case DCN_VERSION_1_0:
0758 case DCN_VERSION_1_01:
0759 funcs->ctx.ver = PP_SMU_VER_RV;
0760 funcs->rv_funcs.pp_smu.dm = ctx;
0761 funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
0762 funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
0763 funcs->rv_funcs.set_display_count =
0764 pp_rv_set_active_display_count;
0765 funcs->rv_funcs.set_min_deep_sleep_dcfclk =
0766 pp_rv_set_min_deep_sleep_dcfclk;
0767 funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
0768 pp_rv_set_hard_min_dcefclk_by_freq;
0769 funcs->rv_funcs.set_hard_min_fclk_by_freq =
0770 pp_rv_set_hard_min_fclk_by_freq;
0771 break;
0772 case DCN_VERSION_2_0:
0773 funcs->ctx.ver = PP_SMU_VER_NV;
0774 funcs->nv_funcs.pp_smu.dm = ctx;
0775 funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
0776 funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
0777 pp_nv_set_hard_min_dcefclk_by_freq;
0778 funcs->nv_funcs.set_min_deep_sleep_dcfclk =
0779 pp_nv_set_min_deep_sleep_dcfclk;
0780 funcs->nv_funcs.set_voltage_by_freq =
0781 pp_nv_set_voltage_by_freq;
0782 funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
0783
0784
0785 funcs->nv_funcs.set_pme_wa_enable = NULL;
0786
0787 funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
0788
0789 funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
0790
0791 funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
0792 funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
0793 break;
0794
0795 case DCN_VERSION_2_1:
0796 funcs->ctx.ver = PP_SMU_VER_RN;
0797 funcs->rn_funcs.pp_smu.dm = ctx;
0798 funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
0799 funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
0800 break;
0801 default:
0802 DRM_ERROR("smu version is not supported !\n");
0803 break;
0804 }
0805 }