0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "radeon.h"
0026 #include "radeon_asic.h"
0027 #include "r600d.h"
0028 #include "r600_dpm.h"
0029 #include "atom.h"
0030
0031 const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
0032 {
0033 R600_UTC_DFLT_00,
0034 R600_UTC_DFLT_01,
0035 R600_UTC_DFLT_02,
0036 R600_UTC_DFLT_03,
0037 R600_UTC_DFLT_04,
0038 R600_UTC_DFLT_05,
0039 R600_UTC_DFLT_06,
0040 R600_UTC_DFLT_07,
0041 R600_UTC_DFLT_08,
0042 R600_UTC_DFLT_09,
0043 R600_UTC_DFLT_10,
0044 R600_UTC_DFLT_11,
0045 R600_UTC_DFLT_12,
0046 R600_UTC_DFLT_13,
0047 R600_UTC_DFLT_14,
0048 };
0049
0050 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
0051 {
0052 R600_DTC_DFLT_00,
0053 R600_DTC_DFLT_01,
0054 R600_DTC_DFLT_02,
0055 R600_DTC_DFLT_03,
0056 R600_DTC_DFLT_04,
0057 R600_DTC_DFLT_05,
0058 R600_DTC_DFLT_06,
0059 R600_DTC_DFLT_07,
0060 R600_DTC_DFLT_08,
0061 R600_DTC_DFLT_09,
0062 R600_DTC_DFLT_10,
0063 R600_DTC_DFLT_11,
0064 R600_DTC_DFLT_12,
0065 R600_DTC_DFLT_13,
0066 R600_DTC_DFLT_14,
0067 };
0068
0069 void r600_dpm_print_class_info(u32 class, u32 class2)
0070 {
0071 const char *s;
0072
0073 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
0074 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
0075 default:
0076 s = "none";
0077 break;
0078 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
0079 s = "battery";
0080 break;
0081 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
0082 s = "balanced";
0083 break;
0084 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
0085 s = "performance";
0086 break;
0087 }
0088 printk("\tui class: %s\n", s);
0089
0090 printk("\tinternal class:");
0091 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
0092 (class2 == 0))
0093 pr_cont(" none");
0094 else {
0095 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
0096 pr_cont(" boot");
0097 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
0098 pr_cont(" thermal");
0099 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
0100 pr_cont(" limited_pwr");
0101 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
0102 pr_cont(" rest");
0103 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
0104 pr_cont(" forced");
0105 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
0106 pr_cont(" 3d_perf");
0107 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
0108 pr_cont(" ovrdrv");
0109 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
0110 pr_cont(" uvd");
0111 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
0112 pr_cont(" 3d_low");
0113 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
0114 pr_cont(" acpi");
0115 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
0116 pr_cont(" uvd_hd2");
0117 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
0118 pr_cont(" uvd_hd");
0119 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
0120 pr_cont(" uvd_sd");
0121 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
0122 pr_cont(" limited_pwr2");
0123 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
0124 pr_cont(" ulv");
0125 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
0126 pr_cont(" uvd_mvc");
0127 }
0128 pr_cont("\n");
0129 }
0130
0131 void r600_dpm_print_cap_info(u32 caps)
0132 {
0133 printk("\tcaps:");
0134 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
0135 pr_cont(" single_disp");
0136 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
0137 pr_cont(" video");
0138 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
0139 pr_cont(" no_dc");
0140 pr_cont("\n");
0141 }
0142
0143 void r600_dpm_print_ps_status(struct radeon_device *rdev,
0144 struct radeon_ps *rps)
0145 {
0146 printk("\tstatus:");
0147 if (rps == rdev->pm.dpm.current_ps)
0148 pr_cont(" c");
0149 if (rps == rdev->pm.dpm.requested_ps)
0150 pr_cont(" r");
0151 if (rps == rdev->pm.dpm.boot_ps)
0152 pr_cont(" b");
0153 pr_cont("\n");
0154 }
0155
0156 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
0157 {
0158 struct drm_device *dev = rdev->ddev;
0159 struct drm_crtc *crtc;
0160 struct radeon_crtc *radeon_crtc;
0161 u32 vblank_in_pixels;
0162 u32 vblank_time_us = 0xffffffff;
0163
0164 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
0165 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
0166 radeon_crtc = to_radeon_crtc(crtc);
0167 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
0168 vblank_in_pixels =
0169 radeon_crtc->hw_mode.crtc_htotal *
0170 (radeon_crtc->hw_mode.crtc_vblank_end -
0171 radeon_crtc->hw_mode.crtc_vdisplay +
0172 (radeon_crtc->v_border * 2));
0173
0174 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
0175 break;
0176 }
0177 }
0178 }
0179
0180 return vblank_time_us;
0181 }
0182
0183 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
0184 {
0185 struct drm_device *dev = rdev->ddev;
0186 struct drm_crtc *crtc;
0187 struct radeon_crtc *radeon_crtc;
0188 u32 vrefresh = 0;
0189
0190 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
0191 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
0192 radeon_crtc = to_radeon_crtc(crtc);
0193 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
0194 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
0195 break;
0196 }
0197 }
0198 }
0199 return vrefresh;
0200 }
0201
0202 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
0203 u32 *p, u32 *u)
0204 {
0205 u32 b_c = 0;
0206 u32 i_c;
0207 u32 tmp;
0208
0209 i_c = (i * r_c) / 100;
0210 tmp = i_c >> p_b;
0211
0212 while (tmp) {
0213 b_c++;
0214 tmp >>= 1;
0215 }
0216
0217 *u = (b_c + 1) / 2;
0218 *p = i_c / (1 << (2 * (*u)));
0219 }
0220
0221 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
0222 {
0223 u32 k, a, ah, al;
0224 u32 t1;
0225
0226 if ((fl == 0) || (fh == 0) || (fl > fh))
0227 return -EINVAL;
0228
0229 k = (100 * fh) / fl;
0230 t1 = (t * (k - 100));
0231 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
0232 a = (a + 5) / 10;
0233 ah = ((a * t) + 5000) / 10000;
0234 al = a - ah;
0235
0236 *th = t - ah;
0237 *tl = t + al;
0238
0239 return 0;
0240 }
0241
0242 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
0243 {
0244 int i;
0245
0246 if (enable) {
0247 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
0248 } else {
0249 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
0250
0251 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
0252
0253 for (i = 0; i < rdev->usec_timeout; i++) {
0254 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
0255 break;
0256 udelay(1);
0257 }
0258
0259 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
0260
0261 WREG32(GRBM_PWR_CNTL, 0x1);
0262 RREG32(GRBM_PWR_CNTL);
0263 }
0264 }
0265
0266 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
0267 {
0268 if (enable)
0269 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
0270 else
0271 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
0272 }
0273
0274 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
0275 {
0276 if (enable)
0277 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
0278 else
0279 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
0280 }
0281
0282 void r600_enable_acpi_pm(struct radeon_device *rdev)
0283 {
0284 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
0285 }
0286
0287 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
0288 {
0289 if (enable)
0290 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
0291 else
0292 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
0293 }
0294
0295 bool r600_dynamicpm_enabled(struct radeon_device *rdev)
0296 {
0297 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
0298 return true;
0299 else
0300 return false;
0301 }
0302
0303 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
0304 {
0305 if (enable)
0306 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
0307 else
0308 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
0309 }
0310
0311 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
0312 {
0313 if (enable)
0314 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
0315 else
0316 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
0317 }
0318
0319 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
0320 {
0321 if (enable)
0322 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
0323 else
0324 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
0325 }
0326
0327 void r600_wait_for_spll_change(struct radeon_device *rdev)
0328 {
0329 int i;
0330
0331 for (i = 0; i < rdev->usec_timeout; i++) {
0332 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
0333 break;
0334 udelay(1);
0335 }
0336 }
0337
0338 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
0339 {
0340 WREG32(CG_BSP, BSP(p) | BSU(u));
0341 }
0342
0343 void r600_set_at(struct radeon_device *rdev,
0344 u32 l_to_m, u32 m_to_h,
0345 u32 h_to_m, u32 m_to_l)
0346 {
0347 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
0348 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
0349 }
0350
0351 void r600_set_tc(struct radeon_device *rdev,
0352 u32 index, u32 u_t, u32 d_t)
0353 {
0354 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
0355 }
0356
0357 void r600_select_td(struct radeon_device *rdev,
0358 enum r600_td td)
0359 {
0360 if (td == R600_TD_AUTO)
0361 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
0362 else
0363 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
0364 if (td == R600_TD_UP)
0365 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
0366 if (td == R600_TD_DOWN)
0367 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
0368 }
0369
0370 void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
0371 {
0372 WREG32(CG_FTV, vrv);
0373 }
0374
0375 void r600_set_tpu(struct radeon_device *rdev, u32 u)
0376 {
0377 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
0378 }
0379
0380 void r600_set_tpc(struct radeon_device *rdev, u32 c)
0381 {
0382 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
0383 }
0384
0385 void r600_set_sstu(struct radeon_device *rdev, u32 u)
0386 {
0387 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
0388 }
0389
0390 void r600_set_sst(struct radeon_device *rdev, u32 t)
0391 {
0392 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
0393 }
0394
0395 void r600_set_git(struct radeon_device *rdev, u32 t)
0396 {
0397 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
0398 }
0399
0400 void r600_set_fctu(struct radeon_device *rdev, u32 u)
0401 {
0402 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
0403 }
0404
0405 void r600_set_fct(struct radeon_device *rdev, u32 t)
0406 {
0407 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
0408 }
0409
0410 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
0411 {
0412 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
0413 }
0414
0415 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
0416 {
0417 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
0418 }
0419
0420 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
0421 {
0422 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
0423 }
0424
0425 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
0426 {
0427 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
0428 }
0429
0430 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
0431 {
0432 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
0433 }
0434
0435 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
0436 {
0437 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
0438 }
0439
0440 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
0441 {
0442 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
0443 }
0444
0445 void r600_engine_clock_entry_enable(struct radeon_device *rdev,
0446 u32 index, bool enable)
0447 {
0448 if (enable)
0449 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
0450 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
0451 else
0452 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
0453 0, ~STEP_0_SPLL_ENTRY_VALID);
0454 }
0455
0456 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
0457 u32 index, bool enable)
0458 {
0459 if (enable)
0460 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
0461 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
0462 else
0463 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
0464 0, ~STEP_0_SPLL_STEP_ENABLE);
0465 }
0466
0467 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
0468 u32 index, bool enable)
0469 {
0470 if (enable)
0471 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
0472 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
0473 else
0474 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
0475 0, ~STEP_0_POST_DIV_EN);
0476 }
0477
0478 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
0479 u32 index, u32 divider)
0480 {
0481 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
0482 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
0483 }
0484
0485 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
0486 u32 index, u32 divider)
0487 {
0488 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
0489 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
0490 }
0491
0492 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
0493 u32 index, u32 divider)
0494 {
0495 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
0496 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
0497 }
0498
0499 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
0500 u32 index, u32 step_time)
0501 {
0502 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
0503 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
0504 }
0505
0506 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
0507 {
0508 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
0509 }
0510
0511 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
0512 {
0513 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
0514 }
0515
0516 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
0517 {
0518 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
0519 }
0520
0521 void r600_voltage_control_enable_pins(struct radeon_device *rdev,
0522 u64 mask)
0523 {
0524 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
0525 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
0526 }
0527
0528
0529 void r600_voltage_control_program_voltages(struct radeon_device *rdev,
0530 enum r600_power_level index, u64 pins)
0531 {
0532 u32 tmp, mask;
0533 u32 ix = 3 - (3 & index);
0534
0535 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
0536
0537 mask = 7 << (3 * ix);
0538 tmp = RREG32(VID_UPPER_GPIO_CNTL);
0539 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
0540 WREG32(VID_UPPER_GPIO_CNTL, tmp);
0541 }
0542
0543 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
0544 u64 mask)
0545 {
0546 u32 gpio;
0547
0548 gpio = RREG32(GPIOPAD_MASK);
0549 gpio &= ~mask;
0550 WREG32(GPIOPAD_MASK, gpio);
0551
0552 gpio = RREG32(GPIOPAD_EN);
0553 gpio &= ~mask;
0554 WREG32(GPIOPAD_EN, gpio);
0555
0556 gpio = RREG32(GPIOPAD_A);
0557 gpio &= ~mask;
0558 WREG32(GPIOPAD_A, gpio);
0559 }
0560
0561 void r600_power_level_enable(struct radeon_device *rdev,
0562 enum r600_power_level index, bool enable)
0563 {
0564 u32 ix = 3 - (3 & index);
0565
0566 if (enable)
0567 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
0568 ~CTXSW_FREQ_STATE_ENABLE);
0569 else
0570 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
0571 ~CTXSW_FREQ_STATE_ENABLE);
0572 }
0573
0574 void r600_power_level_set_voltage_index(struct radeon_device *rdev,
0575 enum r600_power_level index, u32 voltage_index)
0576 {
0577 u32 ix = 3 - (3 & index);
0578
0579 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
0580 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
0581 }
0582
0583 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
0584 enum r600_power_level index, u32 mem_clock_index)
0585 {
0586 u32 ix = 3 - (3 & index);
0587
0588 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
0589 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
0590 }
0591
0592 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
0593 enum r600_power_level index, u32 eng_clock_index)
0594 {
0595 u32 ix = 3 - (3 & index);
0596
0597 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
0598 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
0599 }
0600
0601 void r600_power_level_set_watermark_id(struct radeon_device *rdev,
0602 enum r600_power_level index,
0603 enum r600_display_watermark watermark_id)
0604 {
0605 u32 ix = 3 - (3 & index);
0606 u32 tmp = 0;
0607
0608 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
0609 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
0610 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
0611 }
0612
0613 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
0614 enum r600_power_level index, bool compatible)
0615 {
0616 u32 ix = 3 - (3 & index);
0617 u32 tmp = 0;
0618
0619 if (compatible)
0620 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
0621 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
0622 }
0623
0624 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
0625 {
0626 u32 tmp;
0627
0628 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
0629 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
0630 return tmp;
0631 }
0632
0633 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
0634 {
0635 u32 tmp;
0636
0637 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
0638 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
0639 return tmp;
0640 }
0641
0642 void r600_power_level_set_enter_index(struct radeon_device *rdev,
0643 enum r600_power_level index)
0644 {
0645 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
0646 ~DYN_PWR_ENTER_INDEX_MASK);
0647 }
0648
0649 void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
0650 enum r600_power_level index)
0651 {
0652 int i;
0653
0654 for (i = 0; i < rdev->usec_timeout; i++) {
0655 if (r600_power_level_get_target_index(rdev) != index)
0656 break;
0657 udelay(1);
0658 }
0659
0660 for (i = 0; i < rdev->usec_timeout; i++) {
0661 if (r600_power_level_get_current_index(rdev) != index)
0662 break;
0663 udelay(1);
0664 }
0665 }
0666
0667 void r600_wait_for_power_level(struct radeon_device *rdev,
0668 enum r600_power_level index)
0669 {
0670 int i;
0671
0672 for (i = 0; i < rdev->usec_timeout; i++) {
0673 if (r600_power_level_get_target_index(rdev) == index)
0674 break;
0675 udelay(1);
0676 }
0677
0678 for (i = 0; i < rdev->usec_timeout; i++) {
0679 if (r600_power_level_get_current_index(rdev) == index)
0680 break;
0681 udelay(1);
0682 }
0683 }
0684
0685 void r600_start_dpm(struct radeon_device *rdev)
0686 {
0687 r600_enable_sclk_control(rdev, false);
0688 r600_enable_mclk_control(rdev, false);
0689
0690 r600_dynamicpm_enable(rdev, true);
0691
0692 radeon_wait_for_vblank(rdev, 0);
0693 radeon_wait_for_vblank(rdev, 1);
0694
0695 r600_enable_spll_bypass(rdev, true);
0696 r600_wait_for_spll_change(rdev);
0697 r600_enable_spll_bypass(rdev, false);
0698 r600_wait_for_spll_change(rdev);
0699
0700 r600_enable_spll_bypass(rdev, true);
0701 r600_wait_for_spll_change(rdev);
0702 r600_enable_spll_bypass(rdev, false);
0703 r600_wait_for_spll_change(rdev);
0704
0705 r600_enable_sclk_control(rdev, true);
0706 r600_enable_mclk_control(rdev, true);
0707 }
0708
0709 void r600_stop_dpm(struct radeon_device *rdev)
0710 {
0711 r600_dynamicpm_enable(rdev, false);
0712 }
0713
0714 int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
0715 {
0716 return 0;
0717 }
0718
0719 void r600_dpm_post_set_power_state(struct radeon_device *rdev)
0720 {
0721
0722 }
0723
0724 bool r600_is_uvd_state(u32 class, u32 class2)
0725 {
0726 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
0727 return true;
0728 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
0729 return true;
0730 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
0731 return true;
0732 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
0733 return true;
0734 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
0735 return true;
0736 return false;
0737 }
0738
0739 static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
0740 int min_temp, int max_temp)
0741 {
0742 int low_temp = 0 * 1000;
0743 int high_temp = 255 * 1000;
0744
0745 if (low_temp < min_temp)
0746 low_temp = min_temp;
0747 if (high_temp > max_temp)
0748 high_temp = max_temp;
0749 if (high_temp < low_temp) {
0750 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
0751 return -EINVAL;
0752 }
0753
0754 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
0755 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
0756 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
0757
0758 rdev->pm.dpm.thermal.min_temp = low_temp;
0759 rdev->pm.dpm.thermal.max_temp = high_temp;
0760
0761 return 0;
0762 }
0763
0764 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
0765 {
0766 switch (sensor) {
0767 case THERMAL_TYPE_RV6XX:
0768 case THERMAL_TYPE_RV770:
0769 case THERMAL_TYPE_EVERGREEN:
0770 case THERMAL_TYPE_SUMO:
0771 case THERMAL_TYPE_NI:
0772 case THERMAL_TYPE_SI:
0773 case THERMAL_TYPE_CI:
0774 case THERMAL_TYPE_KV:
0775 return true;
0776 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
0777 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
0778 return false;
0779 case THERMAL_TYPE_NONE:
0780 case THERMAL_TYPE_EXTERNAL:
0781 case THERMAL_TYPE_EXTERNAL_GPIO:
0782 default:
0783 return false;
0784 }
0785 }
0786
0787 int r600_dpm_late_enable(struct radeon_device *rdev)
0788 {
0789 int ret;
0790
0791 if (rdev->irq.installed &&
0792 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
0793 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
0794 if (ret)
0795 return ret;
0796 rdev->irq.dpm_thermal = true;
0797 radeon_irq_set(rdev);
0798 }
0799
0800 return 0;
0801 }
0802
0803 union power_info {
0804 struct _ATOM_POWERPLAY_INFO info;
0805 struct _ATOM_POWERPLAY_INFO_V2 info_2;
0806 struct _ATOM_POWERPLAY_INFO_V3 info_3;
0807 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
0808 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
0809 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
0810 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
0811 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
0812 };
0813
0814 union fan_info {
0815 struct _ATOM_PPLIB_FANTABLE fan;
0816 struct _ATOM_PPLIB_FANTABLE2 fan2;
0817 struct _ATOM_PPLIB_FANTABLE3 fan3;
0818 };
0819
0820 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
0821 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
0822 {
0823 int i;
0824 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
0825
0826 radeon_table->entries = kcalloc(atom_table->ucNumEntries,
0827 sizeof(struct radeon_clock_voltage_dependency_entry),
0828 GFP_KERNEL);
0829 if (!radeon_table->entries)
0830 return -ENOMEM;
0831
0832 entry = &atom_table->entries[0];
0833 for (i = 0; i < atom_table->ucNumEntries; i++) {
0834 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
0835 (entry->ucClockHigh << 16);
0836 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
0837 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
0838 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
0839 }
0840 radeon_table->count = atom_table->ucNumEntries;
0841
0842 return 0;
0843 }
0844
0845 int r600_get_platform_caps(struct radeon_device *rdev)
0846 {
0847 struct radeon_mode_info *mode_info = &rdev->mode_info;
0848 union power_info *power_info;
0849 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
0850 u16 data_offset;
0851 u8 frev, crev;
0852
0853 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
0854 &frev, &crev, &data_offset))
0855 return -EINVAL;
0856 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
0857
0858 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
0859 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
0860 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
0861
0862 return 0;
0863 }
0864
0865
0866 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
0867 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
0868 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
0869 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
0870 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
0871 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
0872
0873 int r600_parse_extended_power_table(struct radeon_device *rdev)
0874 {
0875 struct radeon_mode_info *mode_info = &rdev->mode_info;
0876 union power_info *power_info;
0877 union fan_info *fan_info;
0878 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
0879 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
0880 u16 data_offset;
0881 u8 frev, crev;
0882 int ret, i;
0883
0884 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
0885 &frev, &crev, &data_offset))
0886 return -EINVAL;
0887 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
0888
0889
0890 if (le16_to_cpu(power_info->pplib.usTableSize) >=
0891 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
0892 if (power_info->pplib3.usFanTableOffset) {
0893 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
0894 le16_to_cpu(power_info->pplib3.usFanTableOffset));
0895 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
0896 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
0897 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
0898 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
0899 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
0900 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
0901 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
0902 if (fan_info->fan.ucFanTableFormat >= 2)
0903 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
0904 else
0905 rdev->pm.dpm.fan.t_max = 10900;
0906 rdev->pm.dpm.fan.cycle_delay = 100000;
0907 if (fan_info->fan.ucFanTableFormat >= 3) {
0908 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
0909 rdev->pm.dpm.fan.default_max_fan_pwm =
0910 le16_to_cpu(fan_info->fan3.usFanPWMMax);
0911 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
0912 rdev->pm.dpm.fan.fan_output_sensitivity =
0913 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
0914 }
0915 rdev->pm.dpm.fan.ucode_fan_control = true;
0916 }
0917 }
0918
0919
0920 if (le16_to_cpu(power_info->pplib.usTableSize) >=
0921 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
0922 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
0923 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0924 (mode_info->atom_context->bios + data_offset +
0925 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
0926 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
0927 dep_table);
0928 if (ret)
0929 return ret;
0930 }
0931 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
0932 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0933 (mode_info->atom_context->bios + data_offset +
0934 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
0935 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
0936 dep_table);
0937 if (ret) {
0938 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
0939 return ret;
0940 }
0941 }
0942 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
0943 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0944 (mode_info->atom_context->bios + data_offset +
0945 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
0946 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
0947 dep_table);
0948 if (ret) {
0949 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
0950 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
0951 return ret;
0952 }
0953 }
0954 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
0955 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
0956 (mode_info->atom_context->bios + data_offset +
0957 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
0958 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
0959 dep_table);
0960 if (ret) {
0961 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
0962 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
0963 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
0964 return ret;
0965 }
0966 }
0967 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
0968 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
0969 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
0970 (mode_info->atom_context->bios + data_offset +
0971 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
0972 if (clk_v->ucNumEntries) {
0973 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
0974 le16_to_cpu(clk_v->entries[0].usSclkLow) |
0975 (clk_v->entries[0].ucSclkHigh << 16);
0976 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
0977 le16_to_cpu(clk_v->entries[0].usMclkLow) |
0978 (clk_v->entries[0].ucMclkHigh << 16);
0979 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
0980 le16_to_cpu(clk_v->entries[0].usVddc);
0981 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
0982 le16_to_cpu(clk_v->entries[0].usVddci);
0983 }
0984 }
0985 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
0986 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
0987 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
0988 (mode_info->atom_context->bios + data_offset +
0989 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
0990 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
0991
0992 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
0993 kcalloc(psl->ucNumEntries,
0994 sizeof(struct radeon_phase_shedding_limits_entry),
0995 GFP_KERNEL);
0996 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
0997 r600_free_extended_power_table(rdev);
0998 return -ENOMEM;
0999 }
1000
1001 entry = &psl->entries[0];
1002 for (i = 0; i < psl->ucNumEntries; i++) {
1003 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
1004 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
1005 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
1006 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
1007 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
1008 le16_to_cpu(entry->usVoltage);
1009 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
1010 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
1011 }
1012 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1013 psl->ucNumEntries;
1014 }
1015 }
1016
1017
1018 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1019 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1020 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1021 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1022 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1023 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1024 if (rdev->pm.dpm.tdp_od_limit)
1025 rdev->pm.dpm.power_control = true;
1026 else
1027 rdev->pm.dpm.power_control = false;
1028 rdev->pm.dpm.tdp_adjustment = 0;
1029 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1030 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1031 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1032 if (power_info->pplib5.usCACLeakageTableOffset) {
1033 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1034 (ATOM_PPLIB_CAC_Leakage_Table *)
1035 (mode_info->atom_context->bios + data_offset +
1036 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1037 ATOM_PPLIB_CAC_Leakage_Record *entry;
1038 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1039 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1040 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1041 r600_free_extended_power_table(rdev);
1042 return -ENOMEM;
1043 }
1044 entry = &cac_table->entries[0];
1045 for (i = 0; i < cac_table->ucNumEntries; i++) {
1046 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1047 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1048 le16_to_cpu(entry->usVddc1);
1049 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1050 le16_to_cpu(entry->usVddc2);
1051 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1052 le16_to_cpu(entry->usVddc3);
1053 } else {
1054 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1055 le16_to_cpu(entry->usVddc);
1056 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1057 le32_to_cpu(entry->ulLeakageValue);
1058 }
1059 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1060 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1061 }
1062 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1063 }
1064 }
1065
1066
1067 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1068 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1069 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1070 (mode_info->atom_context->bios + data_offset +
1071 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1072 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1073 ext_hdr->usVCETableOffset) {
1074 VCEClockInfoArray *array = (VCEClockInfoArray *)
1075 (mode_info->atom_context->bios + data_offset +
1076 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1077 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1078 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1079 (mode_info->atom_context->bios + data_offset +
1080 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1081 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1082 ATOM_PPLIB_VCE_State_Table *states =
1083 (ATOM_PPLIB_VCE_State_Table *)
1084 (mode_info->atom_context->bios + data_offset +
1085 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1086 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1087 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1088 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1089 ATOM_PPLIB_VCE_State_Record *state_entry;
1090 VCEClockInfo *vce_clk;
1091 u32 size = limits->numEntries *
1092 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1093 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1094 kzalloc(size, GFP_KERNEL);
1095 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1096 r600_free_extended_power_table(rdev);
1097 return -ENOMEM;
1098 }
1099 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1100 limits->numEntries;
1101 entry = &limits->entries[0];
1102 state_entry = &states->entries[0];
1103 for (i = 0; i < limits->numEntries; i++) {
1104 vce_clk = (VCEClockInfo *)
1105 ((u8 *)&array->entries[0] +
1106 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1107 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1108 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1109 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1110 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1111 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1112 le16_to_cpu(entry->usVoltage);
1113 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1114 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1115 }
1116 for (i = 0; i < states->numEntries; i++) {
1117 if (i >= RADEON_MAX_VCE_LEVELS)
1118 break;
1119 vce_clk = (VCEClockInfo *)
1120 ((u8 *)&array->entries[0] +
1121 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1122 rdev->pm.dpm.vce_states[i].evclk =
1123 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1124 rdev->pm.dpm.vce_states[i].ecclk =
1125 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1126 rdev->pm.dpm.vce_states[i].clk_idx =
1127 state_entry->ucClockInfoIndex & 0x3f;
1128 rdev->pm.dpm.vce_states[i].pstate =
1129 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
1130 state_entry = (ATOM_PPLIB_VCE_State_Record *)
1131 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1132 }
1133 }
1134 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1135 ext_hdr->usUVDTableOffset) {
1136 UVDClockInfoArray *array = (UVDClockInfoArray *)
1137 (mode_info->atom_context->bios + data_offset +
1138 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1139 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1140 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1141 (mode_info->atom_context->bios + data_offset +
1142 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1143 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1144 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1145 u32 size = limits->numEntries *
1146 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1147 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1148 kzalloc(size, GFP_KERNEL);
1149 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1150 r600_free_extended_power_table(rdev);
1151 return -ENOMEM;
1152 }
1153 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1154 limits->numEntries;
1155 entry = &limits->entries[0];
1156 for (i = 0; i < limits->numEntries; i++) {
1157 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1158 ((u8 *)&array->entries[0] +
1159 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1160 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1161 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1162 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1163 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1164 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1165 le16_to_cpu(entry->usVoltage);
1166 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1167 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1168 }
1169 }
1170 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1171 ext_hdr->usSAMUTableOffset) {
1172 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1173 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1174 (mode_info->atom_context->bios + data_offset +
1175 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1176 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1177 u32 size = limits->numEntries *
1178 sizeof(struct radeon_clock_voltage_dependency_entry);
1179 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1180 kzalloc(size, GFP_KERNEL);
1181 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1182 r600_free_extended_power_table(rdev);
1183 return -ENOMEM;
1184 }
1185 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1186 limits->numEntries;
1187 entry = &limits->entries[0];
1188 for (i = 0; i < limits->numEntries; i++) {
1189 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1190 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1191 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1192 le16_to_cpu(entry->usVoltage);
1193 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1194 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1195 }
1196 }
1197 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1198 ext_hdr->usPPMTableOffset) {
1199 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1200 (mode_info->atom_context->bios + data_offset +
1201 le16_to_cpu(ext_hdr->usPPMTableOffset));
1202 rdev->pm.dpm.dyn_state.ppm_table =
1203 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1204 if (!rdev->pm.dpm.dyn_state.ppm_table) {
1205 r600_free_extended_power_table(rdev);
1206 return -ENOMEM;
1207 }
1208 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1209 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1210 le16_to_cpu(ppm->usCpuCoreNumber);
1211 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1212 le32_to_cpu(ppm->ulPlatformTDP);
1213 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1214 le32_to_cpu(ppm->ulSmallACPlatformTDP);
1215 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1216 le32_to_cpu(ppm->ulPlatformTDC);
1217 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1218 le32_to_cpu(ppm->ulSmallACPlatformTDC);
1219 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1220 le32_to_cpu(ppm->ulApuTDP);
1221 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1222 le32_to_cpu(ppm->ulDGpuTDP);
1223 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1224 le32_to_cpu(ppm->ulDGpuUlvPower);
1225 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1226 le32_to_cpu(ppm->ulTjmax);
1227 }
1228 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1229 ext_hdr->usACPTableOffset) {
1230 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1231 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1232 (mode_info->atom_context->bios + data_offset +
1233 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1234 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1235 u32 size = limits->numEntries *
1236 sizeof(struct radeon_clock_voltage_dependency_entry);
1237 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1238 kzalloc(size, GFP_KERNEL);
1239 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1240 r600_free_extended_power_table(rdev);
1241 return -ENOMEM;
1242 }
1243 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1244 limits->numEntries;
1245 entry = &limits->entries[0];
1246 for (i = 0; i < limits->numEntries; i++) {
1247 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1248 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1249 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1250 le16_to_cpu(entry->usVoltage);
1251 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1252 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1253 }
1254 }
1255 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1256 ext_hdr->usPowerTuneTableOffset) {
1257 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1258 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1259 ATOM_PowerTune_Table *pt;
1260 rdev->pm.dpm.dyn_state.cac_tdp_table =
1261 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1262 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1263 r600_free_extended_power_table(rdev);
1264 return -ENOMEM;
1265 }
1266 if (rev > 0) {
1267 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1268 (mode_info->atom_context->bios + data_offset +
1269 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1270 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1271 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
1272 pt = &ppt->power_tune_table;
1273 } else {
1274 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1275 (mode_info->atom_context->bios + data_offset +
1276 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1277 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1278 pt = &ppt->power_tune_table;
1279 }
1280 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1281 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1282 le16_to_cpu(pt->usConfigurableTDP);
1283 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1284 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1285 le16_to_cpu(pt->usBatteryPowerLimit);
1286 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1287 le16_to_cpu(pt->usSmallPowerLimit);
1288 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1289 le16_to_cpu(pt->usLowCACLeakage);
1290 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1291 le16_to_cpu(pt->usHighCACLeakage);
1292 }
1293 }
1294
1295 return 0;
1296 }
1297
1298 void r600_free_extended_power_table(struct radeon_device *rdev)
1299 {
1300 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1301
1302 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1303 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1304 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1305 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1306 kfree(dyn_state->cac_leakage_table.entries);
1307 kfree(dyn_state->phase_shedding_limits_table.entries);
1308 kfree(dyn_state->ppm_table);
1309 kfree(dyn_state->cac_tdp_table);
1310 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1311 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1312 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1313 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1314 }
1315
1316 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1317 u32 sys_mask,
1318 enum radeon_pcie_gen asic_gen,
1319 enum radeon_pcie_gen default_gen)
1320 {
1321 switch (asic_gen) {
1322 case RADEON_PCIE_GEN1:
1323 return RADEON_PCIE_GEN1;
1324 case RADEON_PCIE_GEN2:
1325 return RADEON_PCIE_GEN2;
1326 case RADEON_PCIE_GEN3:
1327 return RADEON_PCIE_GEN3;
1328 default:
1329 if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1330 return RADEON_PCIE_GEN3;
1331 else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1332 return RADEON_PCIE_GEN2;
1333 else
1334 return RADEON_PCIE_GEN1;
1335 }
1336 return RADEON_PCIE_GEN1;
1337 }
1338
1339 u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1340 u16 asic_lanes,
1341 u16 default_lanes)
1342 {
1343 switch (asic_lanes) {
1344 case 0:
1345 default:
1346 return default_lanes;
1347 case 1:
1348 return 1;
1349 case 2:
1350 return 2;
1351 case 4:
1352 return 4;
1353 case 8:
1354 return 8;
1355 case 12:
1356 return 12;
1357 case 16:
1358 return 16;
1359 }
1360 }
1361
1362 u8 r600_encode_pci_lane_width(u32 lanes)
1363 {
1364 static const u8 encoded_lanes[] = {
1365 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6
1366 };
1367
1368 if (lanes > 16)
1369 return 0;
1370
1371 return encoded_lanes[lanes];
1372 }