0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include "amdgpu.h"
0024 #include "amdgpu_i2c.h"
0025 #include "amdgpu_atombios.h"
0026 #include "atom.h"
0027 #include "amd_pcie.h"
0028 #include "legacy_dpm.h"
0029 #include "amdgpu_dpm_internal.h"
0030 #include "amdgpu_display.h"
0031
0032 #define amdgpu_dpm_pre_set_power_state(adev) \
0033 ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
0034
0035 #define amdgpu_dpm_post_set_power_state(adev) \
0036 ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
0037
0038 #define amdgpu_dpm_display_configuration_changed(adev) \
0039 ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
0040
0041 #define amdgpu_dpm_print_power_state(adev, ps) \
0042 ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
0043
0044 #define amdgpu_dpm_vblank_too_short(adev) \
0045 ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
0046
0047 #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
0048 ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
0049
0050 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
0051 {
0052 const char *s;
0053
0054 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
0055 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
0056 default:
0057 s = "none";
0058 break;
0059 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
0060 s = "battery";
0061 break;
0062 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
0063 s = "balanced";
0064 break;
0065 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
0066 s = "performance";
0067 break;
0068 }
0069 printk("\tui class: %s\n", s);
0070 printk("\tinternal class:");
0071 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
0072 (class2 == 0))
0073 pr_cont(" none");
0074 else {
0075 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
0076 pr_cont(" boot");
0077 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
0078 pr_cont(" thermal");
0079 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
0080 pr_cont(" limited_pwr");
0081 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
0082 pr_cont(" rest");
0083 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
0084 pr_cont(" forced");
0085 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
0086 pr_cont(" 3d_perf");
0087 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
0088 pr_cont(" ovrdrv");
0089 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
0090 pr_cont(" uvd");
0091 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
0092 pr_cont(" 3d_low");
0093 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
0094 pr_cont(" acpi");
0095 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
0096 pr_cont(" uvd_hd2");
0097 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
0098 pr_cont(" uvd_hd");
0099 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
0100 pr_cont(" uvd_sd");
0101 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
0102 pr_cont(" limited_pwr2");
0103 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
0104 pr_cont(" ulv");
0105 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
0106 pr_cont(" uvd_mvc");
0107 }
0108 pr_cont("\n");
0109 }
0110
0111 void amdgpu_dpm_print_cap_info(u32 caps)
0112 {
0113 printk("\tcaps:");
0114 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
0115 pr_cont(" single_disp");
0116 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
0117 pr_cont(" video");
0118 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
0119 pr_cont(" no_dc");
0120 pr_cont("\n");
0121 }
0122
0123 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
0124 struct amdgpu_ps *rps)
0125 {
0126 printk("\tstatus:");
0127 if (rps == adev->pm.dpm.current_ps)
0128 pr_cont(" c");
0129 if (rps == adev->pm.dpm.requested_ps)
0130 pr_cont(" r");
0131 if (rps == adev->pm.dpm.boot_ps)
0132 pr_cont(" b");
0133 pr_cont("\n");
0134 }
0135
0136 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
0137 {
0138 int i;
0139
0140 if (adev->powerplay.pp_funcs->print_power_state == NULL)
0141 return;
0142
0143 for (i = 0; i < adev->pm.dpm.num_ps; i++)
0144 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
0145
0146 }
0147
0148 union power_info {
0149 struct _ATOM_POWERPLAY_INFO info;
0150 struct _ATOM_POWERPLAY_INFO_V2 info_2;
0151 struct _ATOM_POWERPLAY_INFO_V3 info_3;
0152 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
0153 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
0154 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
0155 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
0156 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
0157 };
0158
0159 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
0160 {
0161 struct amdgpu_mode_info *mode_info = &adev->mode_info;
0162 union power_info *power_info;
0163 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
0164 u16 data_offset;
0165 u8 frev, crev;
0166
0167 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
0168 &frev, &crev, &data_offset))
0169 return -EINVAL;
0170 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
0171
0172 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
0173 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
0174 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
0175
0176 return 0;
0177 }
0178
0179 union fan_info {
0180 struct _ATOM_PPLIB_FANTABLE fan;
0181 struct _ATOM_PPLIB_FANTABLE2 fan2;
0182 struct _ATOM_PPLIB_FANTABLE3 fan3;
0183 };
0184
0185 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
0186 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
0187 {
0188 u32 size = atom_table->ucNumEntries *
0189 sizeof(struct amdgpu_clock_voltage_dependency_entry);
0190 int i;
0191 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
0192
0193 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
0194 if (!amdgpu_table->entries)
0195 return -ENOMEM;
0196
0197 entry = &atom_table->entries[0];
0198 for (i = 0; i < atom_table->ucNumEntries; i++) {
0199 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
0200 (entry->ucClockHigh << 16);
0201 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
0202 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
0203 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
0204 }
0205 amdgpu_table->count = atom_table->ucNumEntries;
0206
0207 return 0;
0208 }
0209
0210
0211 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
0212 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
0213 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
0214 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
0215 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
0216 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
0217 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
0218 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
0219
0220 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
0221 {
0222 struct amdgpu_mode_info *mode_info = &adev->mode_info;
0223 union power_info *power_info;
0224 union fan_info *fan_info;
0225 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
0226 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
0227 u16 data_offset;
0228 u8 frev, crev;
0229 int ret, i;
0230
0231 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
0232 &frev, &crev, &data_offset))
0233 return -EINVAL;
0234 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
0235
0236
0237 if (le16_to_cpu(power_info->pplib.usTableSize) >=
0238 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
0239 if (power_info->pplib3.usFanTableOffset) {
0240 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
0241 le16_to_cpu(power_info->pplib3.usFanTableOffset));
0242 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
0243 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
0244 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
0245 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
0246 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
0247 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
0248 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
0249 if (fan_info->fan.ucFanTableFormat >= 2)
0250 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
0251 else
0252 adev->pm.dpm.fan.t_max = 10900;
0253 adev->pm.dpm.fan.cycle_delay = 100000;
0254 if (fan_info->fan.ucFanTableFormat >= 3) {
0255 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
0256 adev->pm.dpm.fan.default_max_fan_pwm =
0257 le16_to_cpu(fan_info->fan3.usFanPWMMax);
0258 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
0259 adev->pm.dpm.fan.fan_output_sensitivity =
0260 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
0261 }
0262 adev->pm.dpm.fan.ucode_fan_control = true;
0263 }
0264 }
0265
0266
0267 if (le16_to_cpu(power_info->pplib.usTableSize) >=
0268 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
0269 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
0270 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0271 (mode_info->atom_context->bios + data_offset +
0272 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
0273 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
0274 dep_table);
0275 if (ret) {
0276 amdgpu_free_extended_power_table(adev);
0277 return ret;
0278 }
0279 }
0280 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
0281 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0282 (mode_info->atom_context->bios + data_offset +
0283 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
0284 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
0285 dep_table);
0286 if (ret) {
0287 amdgpu_free_extended_power_table(adev);
0288 return ret;
0289 }
0290 }
0291 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
0292 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0293 (mode_info->atom_context->bios + data_offset +
0294 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
0295 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
0296 dep_table);
0297 if (ret) {
0298 amdgpu_free_extended_power_table(adev);
0299 return ret;
0300 }
0301 }
0302 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
0303 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0304 (mode_info->atom_context->bios + data_offset +
0305 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
0306 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
0307 dep_table);
0308 if (ret) {
0309 amdgpu_free_extended_power_table(adev);
0310 return ret;
0311 }
0312 }
0313 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
0314 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
0315 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
0316 (mode_info->atom_context->bios + data_offset +
0317 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
0318 if (clk_v->ucNumEntries) {
0319 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
0320 le16_to_cpu(clk_v->entries[0].usSclkLow) |
0321 (clk_v->entries[0].ucSclkHigh << 16);
0322 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
0323 le16_to_cpu(clk_v->entries[0].usMclkLow) |
0324 (clk_v->entries[0].ucMclkHigh << 16);
0325 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
0326 le16_to_cpu(clk_v->entries[0].usVddc);
0327 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
0328 le16_to_cpu(clk_v->entries[0].usVddci);
0329 }
0330 }
0331 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
0332 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
0333 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
0334 (mode_info->atom_context->bios + data_offset +
0335 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
0336 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
0337
0338 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
0339 kcalloc(psl->ucNumEntries,
0340 sizeof(struct amdgpu_phase_shedding_limits_entry),
0341 GFP_KERNEL);
0342 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
0343 amdgpu_free_extended_power_table(adev);
0344 return -ENOMEM;
0345 }
0346
0347 entry = &psl->entries[0];
0348 for (i = 0; i < psl->ucNumEntries; i++) {
0349 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
0350 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
0351 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
0352 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
0353 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
0354 le16_to_cpu(entry->usVoltage);
0355 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
0356 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
0357 }
0358 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
0359 psl->ucNumEntries;
0360 }
0361 }
0362
0363
0364 if (le16_to_cpu(power_info->pplib.usTableSize) >=
0365 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
0366 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
0367 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
0368 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
0369 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
0370 if (adev->pm.dpm.tdp_od_limit)
0371 adev->pm.dpm.power_control = true;
0372 else
0373 adev->pm.dpm.power_control = false;
0374 adev->pm.dpm.tdp_adjustment = 0;
0375 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
0376 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
0377 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
0378 if (power_info->pplib5.usCACLeakageTableOffset) {
0379 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
0380 (ATOM_PPLIB_CAC_Leakage_Table *)
0381 (mode_info->atom_context->bios + data_offset +
0382 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
0383 ATOM_PPLIB_CAC_Leakage_Record *entry;
0384 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
0385 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
0386 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
0387 amdgpu_free_extended_power_table(adev);
0388 return -ENOMEM;
0389 }
0390 entry = &cac_table->entries[0];
0391 for (i = 0; i < cac_table->ucNumEntries; i++) {
0392 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
0393 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
0394 le16_to_cpu(entry->usVddc1);
0395 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
0396 le16_to_cpu(entry->usVddc2);
0397 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
0398 le16_to_cpu(entry->usVddc3);
0399 } else {
0400 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
0401 le16_to_cpu(entry->usVddc);
0402 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
0403 le32_to_cpu(entry->ulLeakageValue);
0404 }
0405 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
0406 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
0407 }
0408 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
0409 }
0410 }
0411
0412
0413 if (le16_to_cpu(power_info->pplib.usTableSize) >=
0414 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
0415 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
0416 (mode_info->atom_context->bios + data_offset +
0417 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
0418 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
0419 ext_hdr->usVCETableOffset) {
0420 VCEClockInfoArray *array = (VCEClockInfoArray *)
0421 (mode_info->atom_context->bios + data_offset +
0422 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
0423 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
0424 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
0425 (mode_info->atom_context->bios + data_offset +
0426 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
0427 1 + array->ucNumEntries * sizeof(VCEClockInfo));
0428 ATOM_PPLIB_VCE_State_Table *states =
0429 (ATOM_PPLIB_VCE_State_Table *)
0430 (mode_info->atom_context->bios + data_offset +
0431 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
0432 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
0433 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
0434 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
0435 ATOM_PPLIB_VCE_State_Record *state_entry;
0436 VCEClockInfo *vce_clk;
0437 u32 size = limits->numEntries *
0438 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
0439 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
0440 kzalloc(size, GFP_KERNEL);
0441 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
0442 amdgpu_free_extended_power_table(adev);
0443 return -ENOMEM;
0444 }
0445 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
0446 limits->numEntries;
0447 entry = &limits->entries[0];
0448 state_entry = &states->entries[0];
0449 for (i = 0; i < limits->numEntries; i++) {
0450 vce_clk = (VCEClockInfo *)
0451 ((u8 *)&array->entries[0] +
0452 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
0453 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
0454 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
0455 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
0456 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
0457 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
0458 le16_to_cpu(entry->usVoltage);
0459 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
0460 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
0461 }
0462 adev->pm.dpm.num_of_vce_states =
0463 states->numEntries > AMD_MAX_VCE_LEVELS ?
0464 AMD_MAX_VCE_LEVELS : states->numEntries;
0465 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
0466 vce_clk = (VCEClockInfo *)
0467 ((u8 *)&array->entries[0] +
0468 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
0469 adev->pm.dpm.vce_states[i].evclk =
0470 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
0471 adev->pm.dpm.vce_states[i].ecclk =
0472 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
0473 adev->pm.dpm.vce_states[i].clk_idx =
0474 state_entry->ucClockInfoIndex & 0x3f;
0475 adev->pm.dpm.vce_states[i].pstate =
0476 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
0477 state_entry = (ATOM_PPLIB_VCE_State_Record *)
0478 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
0479 }
0480 }
0481 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
0482 ext_hdr->usUVDTableOffset) {
0483 UVDClockInfoArray *array = (UVDClockInfoArray *)
0484 (mode_info->atom_context->bios + data_offset +
0485 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
0486 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
0487 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
0488 (mode_info->atom_context->bios + data_offset +
0489 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
0490 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
0491 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
0492 u32 size = limits->numEntries *
0493 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
0494 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
0495 kzalloc(size, GFP_KERNEL);
0496 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
0497 amdgpu_free_extended_power_table(adev);
0498 return -ENOMEM;
0499 }
0500 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
0501 limits->numEntries;
0502 entry = &limits->entries[0];
0503 for (i = 0; i < limits->numEntries; i++) {
0504 UVDClockInfo *uvd_clk = (UVDClockInfo *)
0505 ((u8 *)&array->entries[0] +
0506 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
0507 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
0508 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
0509 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
0510 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
0511 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
0512 le16_to_cpu(entry->usVoltage);
0513 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
0514 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
0515 }
0516 }
0517 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
0518 ext_hdr->usSAMUTableOffset) {
0519 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
0520 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
0521 (mode_info->atom_context->bios + data_offset +
0522 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
0523 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
0524 u32 size = limits->numEntries *
0525 sizeof(struct amdgpu_clock_voltage_dependency_entry);
0526 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
0527 kzalloc(size, GFP_KERNEL);
0528 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
0529 amdgpu_free_extended_power_table(adev);
0530 return -ENOMEM;
0531 }
0532 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
0533 limits->numEntries;
0534 entry = &limits->entries[0];
0535 for (i = 0; i < limits->numEntries; i++) {
0536 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
0537 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
0538 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
0539 le16_to_cpu(entry->usVoltage);
0540 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
0541 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
0542 }
0543 }
0544 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
0545 ext_hdr->usPPMTableOffset) {
0546 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
0547 (mode_info->atom_context->bios + data_offset +
0548 le16_to_cpu(ext_hdr->usPPMTableOffset));
0549 adev->pm.dpm.dyn_state.ppm_table =
0550 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
0551 if (!adev->pm.dpm.dyn_state.ppm_table) {
0552 amdgpu_free_extended_power_table(adev);
0553 return -ENOMEM;
0554 }
0555 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
0556 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
0557 le16_to_cpu(ppm->usCpuCoreNumber);
0558 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
0559 le32_to_cpu(ppm->ulPlatformTDP);
0560 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
0561 le32_to_cpu(ppm->ulSmallACPlatformTDP);
0562 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
0563 le32_to_cpu(ppm->ulPlatformTDC);
0564 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
0565 le32_to_cpu(ppm->ulSmallACPlatformTDC);
0566 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
0567 le32_to_cpu(ppm->ulApuTDP);
0568 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
0569 le32_to_cpu(ppm->ulDGpuTDP);
0570 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
0571 le32_to_cpu(ppm->ulDGpuUlvPower);
0572 adev->pm.dpm.dyn_state.ppm_table->tj_max =
0573 le32_to_cpu(ppm->ulTjmax);
0574 }
0575 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
0576 ext_hdr->usACPTableOffset) {
0577 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
0578 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
0579 (mode_info->atom_context->bios + data_offset +
0580 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
0581 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
0582 u32 size = limits->numEntries *
0583 sizeof(struct amdgpu_clock_voltage_dependency_entry);
0584 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
0585 kzalloc(size, GFP_KERNEL);
0586 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
0587 amdgpu_free_extended_power_table(adev);
0588 return -ENOMEM;
0589 }
0590 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
0591 limits->numEntries;
0592 entry = &limits->entries[0];
0593 for (i = 0; i < limits->numEntries; i++) {
0594 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
0595 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
0596 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
0597 le16_to_cpu(entry->usVoltage);
0598 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
0599 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
0600 }
0601 }
0602 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
0603 ext_hdr->usPowerTuneTableOffset) {
0604 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
0605 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
0606 ATOM_PowerTune_Table *pt;
0607 adev->pm.dpm.dyn_state.cac_tdp_table =
0608 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
0609 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
0610 amdgpu_free_extended_power_table(adev);
0611 return -ENOMEM;
0612 }
0613 if (rev > 0) {
0614 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
0615 (mode_info->atom_context->bios + data_offset +
0616 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
0617 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
0618 ppt->usMaximumPowerDeliveryLimit;
0619 pt = &ppt->power_tune_table;
0620 } else {
0621 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
0622 (mode_info->atom_context->bios + data_offset +
0623 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
0624 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
0625 pt = &ppt->power_tune_table;
0626 }
0627 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
0628 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
0629 le16_to_cpu(pt->usConfigurableTDP);
0630 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
0631 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
0632 le16_to_cpu(pt->usBatteryPowerLimit);
0633 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
0634 le16_to_cpu(pt->usSmallPowerLimit);
0635 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
0636 le16_to_cpu(pt->usLowCACLeakage);
0637 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
0638 le16_to_cpu(pt->usHighCACLeakage);
0639 }
0640 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
0641 ext_hdr->usSclkVddgfxTableOffset) {
0642 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0643 (mode_info->atom_context->bios + data_offset +
0644 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
0645 ret = amdgpu_parse_clk_voltage_dep_table(
0646 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
0647 dep_table);
0648 if (ret) {
0649 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
0650 return ret;
0651 }
0652 }
0653 }
0654
0655 return 0;
0656 }
0657
0658 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
0659 {
0660 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
0661
0662 kfree(dyn_state->vddc_dependency_on_sclk.entries);
0663 kfree(dyn_state->vddci_dependency_on_mclk.entries);
0664 kfree(dyn_state->vddc_dependency_on_mclk.entries);
0665 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
0666 kfree(dyn_state->cac_leakage_table.entries);
0667 kfree(dyn_state->phase_shedding_limits_table.entries);
0668 kfree(dyn_state->ppm_table);
0669 kfree(dyn_state->cac_tdp_table);
0670 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
0671 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
0672 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
0673 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
0674 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
0675 }
0676
0677 static const char *pp_lib_thermal_controller_names[] = {
0678 "NONE",
0679 "lm63",
0680 "adm1032",
0681 "adm1030",
0682 "max6649",
0683 "lm64",
0684 "f75375",
0685 "RV6xx",
0686 "RV770",
0687 "adt7473",
0688 "NONE",
0689 "External GPIO",
0690 "Evergreen",
0691 "emc2103",
0692 "Sumo",
0693 "Northern Islands",
0694 "Southern Islands",
0695 "lm96163",
0696 "Sea Islands",
0697 "Kaveri/Kabini",
0698 };
0699
0700 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
0701 {
0702 struct amdgpu_mode_info *mode_info = &adev->mode_info;
0703 ATOM_PPLIB_POWERPLAYTABLE *power_table;
0704 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
0705 ATOM_PPLIB_THERMALCONTROLLER *controller;
0706 struct amdgpu_i2c_bus_rec i2c_bus;
0707 u16 data_offset;
0708 u8 frev, crev;
0709
0710 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
0711 &frev, &crev, &data_offset))
0712 return;
0713 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
0714 (mode_info->atom_context->bios + data_offset);
0715 controller = &power_table->sThermalController;
0716
0717
0718 if (controller->ucType > 0) {
0719 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
0720 adev->pm.no_fan = true;
0721 adev->pm.fan_pulses_per_revolution =
0722 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
0723 if (adev->pm.fan_pulses_per_revolution) {
0724 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
0725 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
0726 }
0727 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
0728 DRM_INFO("Internal thermal controller %s fan control\n",
0729 (controller->ucFanParameters &
0730 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0731 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
0732 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
0733 DRM_INFO("Internal thermal controller %s fan control\n",
0734 (controller->ucFanParameters &
0735 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0736 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
0737 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
0738 DRM_INFO("Internal thermal controller %s fan control\n",
0739 (controller->ucFanParameters &
0740 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0741 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
0742 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
0743 DRM_INFO("Internal thermal controller %s fan control\n",
0744 (controller->ucFanParameters &
0745 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0746 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
0747 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
0748 DRM_INFO("Internal thermal controller %s fan control\n",
0749 (controller->ucFanParameters &
0750 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0751 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
0752 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
0753 DRM_INFO("Internal thermal controller %s fan control\n",
0754 (controller->ucFanParameters &
0755 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0756 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
0757 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
0758 DRM_INFO("Internal thermal controller %s fan control\n",
0759 (controller->ucFanParameters &
0760 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0761 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
0762 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
0763 DRM_INFO("Internal thermal controller %s fan control\n",
0764 (controller->ucFanParameters &
0765 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0766 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
0767 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
0768 DRM_INFO("External GPIO thermal controller %s fan control\n",
0769 (controller->ucFanParameters &
0770 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0771 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
0772 } else if (controller->ucType ==
0773 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
0774 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
0775 (controller->ucFanParameters &
0776 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0777 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
0778 } else if (controller->ucType ==
0779 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
0780 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
0781 (controller->ucFanParameters &
0782 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0783 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
0784 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
0785 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
0786 pp_lib_thermal_controller_names[controller->ucType],
0787 controller->ucI2cAddress >> 1,
0788 (controller->ucFanParameters &
0789 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0790 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
0791 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
0792 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
0793 if (adev->pm.i2c_bus) {
0794 struct i2c_board_info info = { };
0795 const char *name = pp_lib_thermal_controller_names[controller->ucType];
0796 info.addr = controller->ucI2cAddress >> 1;
0797 strlcpy(info.type, name, sizeof(info.type));
0798 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
0799 }
0800 } else {
0801 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
0802 controller->ucType,
0803 controller->ucI2cAddress >> 1,
0804 (controller->ucFanParameters &
0805 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
0806 }
0807 }
0808 }
0809
0810 struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
0811 {
0812 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0813
0814 if (idx < adev->pm.dpm.num_of_vce_states)
0815 return &adev->pm.dpm.vce_states[idx];
0816
0817 return NULL;
0818 }
0819
0820 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
0821 enum amd_pm_state_type dpm_state)
0822 {
0823 int i;
0824 struct amdgpu_ps *ps;
0825 u32 ui_class;
0826 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
0827 true : false;
0828
0829
0830 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
0831 if (amdgpu_dpm_vblank_too_short(adev))
0832 single_display = false;
0833 }
0834
0835
0836
0837
0838 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
0839 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
0840
0841 if (dpm_state == POWER_STATE_TYPE_BALANCED)
0842 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
0843
0844 restart_search:
0845
0846 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
0847 ps = &adev->pm.dpm.ps[i];
0848 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
0849 switch (dpm_state) {
0850
0851 case POWER_STATE_TYPE_BATTERY:
0852 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
0853 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
0854 if (single_display)
0855 return ps;
0856 } else
0857 return ps;
0858 }
0859 break;
0860 case POWER_STATE_TYPE_BALANCED:
0861 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
0862 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
0863 if (single_display)
0864 return ps;
0865 } else
0866 return ps;
0867 }
0868 break;
0869 case POWER_STATE_TYPE_PERFORMANCE:
0870 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
0871 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
0872 if (single_display)
0873 return ps;
0874 } else
0875 return ps;
0876 }
0877 break;
0878
0879 case POWER_STATE_TYPE_INTERNAL_UVD:
0880 if (adev->pm.dpm.uvd_ps)
0881 return adev->pm.dpm.uvd_ps;
0882 else
0883 break;
0884 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
0885 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
0886 return ps;
0887 break;
0888 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
0889 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
0890 return ps;
0891 break;
0892 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
0893 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
0894 return ps;
0895 break;
0896 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
0897 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
0898 return ps;
0899 break;
0900 case POWER_STATE_TYPE_INTERNAL_BOOT:
0901 return adev->pm.dpm.boot_ps;
0902 case POWER_STATE_TYPE_INTERNAL_THERMAL:
0903 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
0904 return ps;
0905 break;
0906 case POWER_STATE_TYPE_INTERNAL_ACPI:
0907 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
0908 return ps;
0909 break;
0910 case POWER_STATE_TYPE_INTERNAL_ULV:
0911 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
0912 return ps;
0913 break;
0914 case POWER_STATE_TYPE_INTERNAL_3DPERF:
0915 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
0916 return ps;
0917 break;
0918 default:
0919 break;
0920 }
0921 }
0922
0923 switch (dpm_state) {
0924 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
0925 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
0926 goto restart_search;
0927 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
0928 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
0929 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
0930 if (adev->pm.dpm.uvd_ps) {
0931 return adev->pm.dpm.uvd_ps;
0932 } else {
0933 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
0934 goto restart_search;
0935 }
0936 case POWER_STATE_TYPE_INTERNAL_THERMAL:
0937 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
0938 goto restart_search;
0939 case POWER_STATE_TYPE_INTERNAL_ACPI:
0940 dpm_state = POWER_STATE_TYPE_BATTERY;
0941 goto restart_search;
0942 case POWER_STATE_TYPE_BATTERY:
0943 case POWER_STATE_TYPE_BALANCED:
0944 case POWER_STATE_TYPE_INTERNAL_3DPERF:
0945 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
0946 goto restart_search;
0947 default:
0948 break;
0949 }
0950
0951 return NULL;
0952 }
0953
0954 static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
0955 {
0956 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
0957 struct amdgpu_ps *ps;
0958 enum amd_pm_state_type dpm_state;
0959 int ret;
0960 bool equal = false;
0961
0962
0963 if (!adev->pm.dpm_enabled)
0964 return 0;
0965
0966 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
0967
0968 if ((!adev->pm.dpm.thermal_active) &&
0969 (!adev->pm.dpm.uvd_active))
0970 adev->pm.dpm.state = adev->pm.dpm.user_state;
0971 }
0972 dpm_state = adev->pm.dpm.state;
0973
0974 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
0975 if (ps)
0976 adev->pm.dpm.requested_ps = ps;
0977 else
0978 return -EINVAL;
0979
0980 if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
0981 printk("switching from power state:\n");
0982 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
0983 printk("switching to power state:\n");
0984 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
0985 }
0986
0987
0988 ps->vce_active = adev->pm.dpm.vce_active;
0989 if (pp_funcs->display_configuration_changed)
0990 amdgpu_dpm_display_configuration_changed(adev);
0991
0992 ret = amdgpu_dpm_pre_set_power_state(adev);
0993 if (ret)
0994 return ret;
0995
0996 if (pp_funcs->check_state_equal) {
0997 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
0998 equal = false;
0999 }
1000
1001 if (equal)
1002 return 0;
1003
1004 if (pp_funcs->set_power_state)
1005 pp_funcs->set_power_state(adev->powerplay.pp_handle);
1006
1007 amdgpu_dpm_post_set_power_state(adev);
1008
1009 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1010 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1011
1012 if (pp_funcs->force_performance_level) {
1013 if (adev->pm.dpm.thermal_active) {
1014 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1015
1016 pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1017
1018 adev->pm.dpm.forced_level = level;
1019 } else {
1020
1021 pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
1022 }
1023 }
1024
1025 return 0;
1026 }
1027
1028 void amdgpu_legacy_dpm_compute_clocks(void *handle)
1029 {
1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031
1032 amdgpu_dpm_get_active_displays(adev);
1033
1034 amdgpu_dpm_change_power_state_locked(adev);
1035 }
1036
1037 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1038 {
1039 struct amdgpu_device *adev =
1040 container_of(work, struct amdgpu_device,
1041 pm.dpm.thermal.work);
1042 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043
1044 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1045 int temp, size = sizeof(temp);
1046
1047 if (!adev->pm.dpm_enabled)
1048 return;
1049
1050 if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
1051 AMDGPU_PP_SENSOR_GPU_TEMP,
1052 (void *)&temp,
1053 &size)) {
1054 if (temp < adev->pm.dpm.thermal.min_temp)
1055
1056 dpm_state = adev->pm.dpm.user_state;
1057 } else {
1058 if (adev->pm.dpm.thermal.high_to_low)
1059
1060 dpm_state = adev->pm.dpm.user_state;
1061 }
1062
1063 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1064 adev->pm.dpm.thermal_active = true;
1065 else
1066 adev->pm.dpm.thermal_active = false;
1067
1068 adev->pm.dpm.state = dpm_state;
1069
1070 amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
1071 }