Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2013 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/firmware.h>
0025 #include <linux/pci.h>
0026 #include <linux/seq_file.h>
0027 
0028 #include "atom.h"
0029 #include "ci_dpm.h"
0030 #include "cik.h"
0031 #include "cikd.h"
0032 #include "r600_dpm.h"
0033 #include "radeon.h"
0034 #include "radeon_asic.h"
0035 #include "radeon_ucode.h"
0036 #include "si_dpm.h"
0037 
0038 #define MC_CG_ARB_FREQ_F0           0x0a
0039 #define MC_CG_ARB_FREQ_F1           0x0b
0040 #define MC_CG_ARB_FREQ_F2           0x0c
0041 #define MC_CG_ARB_FREQ_F3           0x0d
0042 
0043 #define SMC_RAM_END 0x40000
0044 
0045 #define VOLTAGE_SCALE               4
0046 #define VOLTAGE_VID_OFFSET_SCALE1    625
0047 #define VOLTAGE_VID_OFFSET_SCALE2    100
0048 
0049 static const struct ci_pt_defaults defaults_hawaii_xt =
0050 {
0051     1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
0052     { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
0053     { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
0054 };
0055 
0056 static const struct ci_pt_defaults defaults_hawaii_pro =
0057 {
0058     1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
0059     { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
0060     { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
0061 };
0062 
0063 static const struct ci_pt_defaults defaults_bonaire_xt =
0064 {
0065     1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
0066     { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
0067     { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
0068 };
0069 
0070 static const struct ci_pt_defaults defaults_saturn_xt =
0071 {
0072     1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
0073     { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
0074     { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
0075 };
0076 
0077 static const struct ci_pt_config_reg didt_config_ci[] =
0078 {
0079     { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0080     { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0081     { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0082     { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0083     { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0084     { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0085     { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0086     { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0087     { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0088     { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0089     { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0090     { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0091     { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
0092     { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
0093     { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
0094     { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0095     { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0096     { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0097     { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0098     { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0099     { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0100     { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0101     { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0102     { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0103     { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0104     { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0105     { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0106     { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0107     { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0108     { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0109     { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
0110     { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
0111     { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
0112     { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0113     { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0114     { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0115     { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0116     { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0117     { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0118     { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0119     { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0120     { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0121     { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0122     { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0123     { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0124     { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0125     { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0126     { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0127     { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
0128     { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
0129     { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
0130     { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0131     { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0132     { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0133     { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0134     { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0135     { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0136     { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0137     { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0138     { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0139     { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0140     { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0141     { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0142     { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0143     { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0144     { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0145     { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
0146     { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
0147     { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
0148     { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0149     { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
0150     { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
0151     { 0xFFFFFFFF }
0152 };
0153 
0154 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
0155 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
0156                        u32 arb_freq_src, u32 arb_freq_dest);
0157 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
0158                      struct atom_voltage_table_entry *voltage_table,
0159                      u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
0160 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
0161 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
0162                        u32 target_tdp);
0163 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
0164 
0165 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
0166 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
0167                               PPSMC_Msg msg, u32 parameter);
0168 
0169 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
0170 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
0171 
0172 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
0173 {
0174     struct ci_power_info *pi = rdev->pm.dpm.priv;
0175 
0176     return pi;
0177 }
0178 
0179 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
0180 {
0181     struct ci_ps *ps = rps->ps_priv;
0182 
0183     return ps;
0184 }
0185 
0186 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
0187 {
0188     struct ci_power_info *pi = ci_get_pi(rdev);
0189 
0190     switch (rdev->pdev->device) {
0191     case 0x6649:
0192     case 0x6650:
0193     case 0x6651:
0194     case 0x6658:
0195     case 0x665C:
0196     case 0x665D:
0197     default:
0198         pi->powertune_defaults = &defaults_bonaire_xt;
0199         break;
0200     case 0x6640:
0201     case 0x6641:
0202     case 0x6646:
0203     case 0x6647:
0204         pi->powertune_defaults = &defaults_saturn_xt;
0205         break;
0206     case 0x67B8:
0207     case 0x67B0:
0208         pi->powertune_defaults = &defaults_hawaii_xt;
0209         break;
0210     case 0x67BA:
0211     case 0x67B1:
0212         pi->powertune_defaults = &defaults_hawaii_pro;
0213         break;
0214     case 0x67A0:
0215     case 0x67A1:
0216     case 0x67A2:
0217     case 0x67A8:
0218     case 0x67A9:
0219     case 0x67AA:
0220     case 0x67B9:
0221     case 0x67BE:
0222         pi->powertune_defaults = &defaults_bonaire_xt;
0223         break;
0224     }
0225 
0226     pi->dte_tj_offset = 0;
0227 
0228     pi->caps_power_containment = true;
0229     pi->caps_cac = false;
0230     pi->caps_sq_ramping = false;
0231     pi->caps_db_ramping = false;
0232     pi->caps_td_ramping = false;
0233     pi->caps_tcp_ramping = false;
0234 
0235     if (pi->caps_power_containment) {
0236         pi->caps_cac = true;
0237         if (rdev->family == CHIP_HAWAII)
0238             pi->enable_bapm_feature = false;
0239         else
0240             pi->enable_bapm_feature = true;
0241         pi->enable_tdc_limit_feature = true;
0242         pi->enable_pkg_pwr_tracking_feature = true;
0243     }
0244 }
0245 
0246 static u8 ci_convert_to_vid(u16 vddc)
0247 {
0248     return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
0249 }
0250 
0251 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
0252 {
0253     struct ci_power_info *pi = ci_get_pi(rdev);
0254     u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
0255     u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
0256     u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
0257     u32 i;
0258 
0259     if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
0260         return -EINVAL;
0261     if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
0262         return -EINVAL;
0263     if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
0264         rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
0265         return -EINVAL;
0266 
0267     for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
0268         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
0269             lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
0270             hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
0271             hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
0272         } else {
0273             lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
0274             hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
0275         }
0276     }
0277     return 0;
0278 }
0279 
0280 static int ci_populate_vddc_vid(struct radeon_device *rdev)
0281 {
0282     struct ci_power_info *pi = ci_get_pi(rdev);
0283     u8 *vid = pi->smc_powertune_table.VddCVid;
0284     u32 i;
0285 
0286     if (pi->vddc_voltage_table.count > 8)
0287         return -EINVAL;
0288 
0289     for (i = 0; i < pi->vddc_voltage_table.count; i++)
0290         vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
0291 
0292     return 0;
0293 }
0294 
0295 static int ci_populate_svi_load_line(struct radeon_device *rdev)
0296 {
0297     struct ci_power_info *pi = ci_get_pi(rdev);
0298     const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
0299 
0300     pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
0301     pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
0302     pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
0303     pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
0304 
0305     return 0;
0306 }
0307 
0308 static int ci_populate_tdc_limit(struct radeon_device *rdev)
0309 {
0310     struct ci_power_info *pi = ci_get_pi(rdev);
0311     const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
0312     u16 tdc_limit;
0313 
0314     tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
0315     pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
0316     pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
0317         pt_defaults->tdc_vddc_throttle_release_limit_perc;
0318     pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
0319 
0320     return 0;
0321 }
0322 
0323 static int ci_populate_dw8(struct radeon_device *rdev)
0324 {
0325     struct ci_power_info *pi = ci_get_pi(rdev);
0326     const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
0327     int ret;
0328 
0329     ret = ci_read_smc_sram_dword(rdev,
0330                      SMU7_FIRMWARE_HEADER_LOCATION +
0331                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
0332                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
0333                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
0334                      pi->sram_end);
0335     if (ret)
0336         return -EINVAL;
0337     else
0338         pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
0339 
0340     return 0;
0341 }
0342 
0343 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
0344 {
0345     struct ci_power_info *pi = ci_get_pi(rdev);
0346 
0347     if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
0348         (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
0349         rdev->pm.dpm.fan.fan_output_sensitivity =
0350             rdev->pm.dpm.fan.default_fan_output_sensitivity;
0351 
0352     pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
0353         cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
0354 
0355     return 0;
0356 }
0357 
0358 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
0359 {
0360     struct ci_power_info *pi = ci_get_pi(rdev);
0361     u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
0362     u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
0363     int i, min, max;
0364 
0365     min = max = hi_vid[0];
0366     for (i = 0; i < 8; i++) {
0367         if (0 != hi_vid[i]) {
0368             if (min > hi_vid[i])
0369                 min = hi_vid[i];
0370             if (max < hi_vid[i])
0371                 max = hi_vid[i];
0372         }
0373 
0374         if (0 != lo_vid[i]) {
0375             if (min > lo_vid[i])
0376                 min = lo_vid[i];
0377             if (max < lo_vid[i])
0378                 max = lo_vid[i];
0379         }
0380     }
0381 
0382     if ((min == 0) || (max == 0))
0383         return -EINVAL;
0384     pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
0385     pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
0386 
0387     return 0;
0388 }
0389 
0390 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
0391 {
0392     struct ci_power_info *pi = ci_get_pi(rdev);
0393     u16 hi_sidd, lo_sidd;
0394     struct radeon_cac_tdp_table *cac_tdp_table =
0395         rdev->pm.dpm.dyn_state.cac_tdp_table;
0396 
0397     hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
0398     lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
0399 
0400     pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
0401     pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
0402 
0403     return 0;
0404 }
0405 
0406 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
0407 {
0408     struct ci_power_info *pi = ci_get_pi(rdev);
0409     const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
0410     SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
0411     struct radeon_cac_tdp_table *cac_tdp_table =
0412         rdev->pm.dpm.dyn_state.cac_tdp_table;
0413     struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
0414     int i, j, k;
0415     const u16 *def1;
0416     const u16 *def2;
0417 
0418     dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
0419     dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
0420 
0421     dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
0422     dpm_table->GpuTjMax =
0423         (u8)(pi->thermal_temp_setting.temperature_high / 1000);
0424     dpm_table->GpuTjHyst = 8;
0425 
0426     dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
0427 
0428     if (ppm) {
0429         dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
0430         dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
0431     } else {
0432         dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
0433         dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
0434     }
0435 
0436     dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
0437     def1 = pt_defaults->bapmti_r;
0438     def2 = pt_defaults->bapmti_rc;
0439 
0440     for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
0441         for (j = 0; j < SMU7_DTE_SOURCES; j++) {
0442             for (k = 0; k < SMU7_DTE_SINKS; k++) {
0443                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
0444                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
0445                 def1++;
0446                 def2++;
0447             }
0448         }
0449     }
0450 
0451     return 0;
0452 }
0453 
0454 static int ci_populate_pm_base(struct radeon_device *rdev)
0455 {
0456     struct ci_power_info *pi = ci_get_pi(rdev);
0457     u32 pm_fuse_table_offset;
0458     int ret;
0459 
0460     if (pi->caps_power_containment) {
0461         ret = ci_read_smc_sram_dword(rdev,
0462                          SMU7_FIRMWARE_HEADER_LOCATION +
0463                          offsetof(SMU7_Firmware_Header, PmFuseTable),
0464                          &pm_fuse_table_offset, pi->sram_end);
0465         if (ret)
0466             return ret;
0467         ret = ci_populate_bapm_vddc_vid_sidd(rdev);
0468         if (ret)
0469             return ret;
0470         ret = ci_populate_vddc_vid(rdev);
0471         if (ret)
0472             return ret;
0473         ret = ci_populate_svi_load_line(rdev);
0474         if (ret)
0475             return ret;
0476         ret = ci_populate_tdc_limit(rdev);
0477         if (ret)
0478             return ret;
0479         ret = ci_populate_dw8(rdev);
0480         if (ret)
0481             return ret;
0482         ret = ci_populate_fuzzy_fan(rdev);
0483         if (ret)
0484             return ret;
0485         ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
0486         if (ret)
0487             return ret;
0488         ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
0489         if (ret)
0490             return ret;
0491         ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
0492                        (u8 *)&pi->smc_powertune_table,
0493                        sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
0494         if (ret)
0495             return ret;
0496     }
0497 
0498     return 0;
0499 }
0500 
0501 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
0502 {
0503     struct ci_power_info *pi = ci_get_pi(rdev);
0504     u32 data;
0505 
0506     if (pi->caps_sq_ramping) {
0507         data = RREG32_DIDT(DIDT_SQ_CTRL0);
0508         if (enable)
0509             data |= DIDT_CTRL_EN;
0510         else
0511             data &= ~DIDT_CTRL_EN;
0512         WREG32_DIDT(DIDT_SQ_CTRL0, data);
0513     }
0514 
0515     if (pi->caps_db_ramping) {
0516         data = RREG32_DIDT(DIDT_DB_CTRL0);
0517         if (enable)
0518             data |= DIDT_CTRL_EN;
0519         else
0520             data &= ~DIDT_CTRL_EN;
0521         WREG32_DIDT(DIDT_DB_CTRL0, data);
0522     }
0523 
0524     if (pi->caps_td_ramping) {
0525         data = RREG32_DIDT(DIDT_TD_CTRL0);
0526         if (enable)
0527             data |= DIDT_CTRL_EN;
0528         else
0529             data &= ~DIDT_CTRL_EN;
0530         WREG32_DIDT(DIDT_TD_CTRL0, data);
0531     }
0532 
0533     if (pi->caps_tcp_ramping) {
0534         data = RREG32_DIDT(DIDT_TCP_CTRL0);
0535         if (enable)
0536             data |= DIDT_CTRL_EN;
0537         else
0538             data &= ~DIDT_CTRL_EN;
0539         WREG32_DIDT(DIDT_TCP_CTRL0, data);
0540     }
0541 }
0542 
0543 static int ci_program_pt_config_registers(struct radeon_device *rdev,
0544                       const struct ci_pt_config_reg *cac_config_regs)
0545 {
0546     const struct ci_pt_config_reg *config_regs = cac_config_regs;
0547     u32 data;
0548     u32 cache = 0;
0549 
0550     if (config_regs == NULL)
0551         return -EINVAL;
0552 
0553     while (config_regs->offset != 0xFFFFFFFF) {
0554         if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
0555             cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
0556         } else {
0557             switch (config_regs->type) {
0558             case CISLANDS_CONFIGREG_SMC_IND:
0559                 data = RREG32_SMC(config_regs->offset);
0560                 break;
0561             case CISLANDS_CONFIGREG_DIDT_IND:
0562                 data = RREG32_DIDT(config_regs->offset);
0563                 break;
0564             default:
0565                 data = RREG32(config_regs->offset << 2);
0566                 break;
0567             }
0568 
0569             data &= ~config_regs->mask;
0570             data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
0571             data |= cache;
0572 
0573             switch (config_regs->type) {
0574             case CISLANDS_CONFIGREG_SMC_IND:
0575                 WREG32_SMC(config_regs->offset, data);
0576                 break;
0577             case CISLANDS_CONFIGREG_DIDT_IND:
0578                 WREG32_DIDT(config_regs->offset, data);
0579                 break;
0580             default:
0581                 WREG32(config_regs->offset << 2, data);
0582                 break;
0583             }
0584             cache = 0;
0585         }
0586         config_regs++;
0587     }
0588     return 0;
0589 }
0590 
0591 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
0592 {
0593     struct ci_power_info *pi = ci_get_pi(rdev);
0594     int ret;
0595 
0596     if (pi->caps_sq_ramping || pi->caps_db_ramping ||
0597         pi->caps_td_ramping || pi->caps_tcp_ramping) {
0598         cik_enter_rlc_safe_mode(rdev);
0599 
0600         if (enable) {
0601             ret = ci_program_pt_config_registers(rdev, didt_config_ci);
0602             if (ret) {
0603                 cik_exit_rlc_safe_mode(rdev);
0604                 return ret;
0605             }
0606         }
0607 
0608         ci_do_enable_didt(rdev, enable);
0609 
0610         cik_exit_rlc_safe_mode(rdev);
0611     }
0612 
0613     return 0;
0614 }
0615 
0616 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
0617 {
0618     struct ci_power_info *pi = ci_get_pi(rdev);
0619     PPSMC_Result smc_result;
0620     int ret = 0;
0621 
0622     if (enable) {
0623         pi->power_containment_features = 0;
0624         if (pi->caps_power_containment) {
0625             if (pi->enable_bapm_feature) {
0626                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
0627                 if (smc_result != PPSMC_Result_OK)
0628                     ret = -EINVAL;
0629                 else
0630                     pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
0631             }
0632 
0633             if (pi->enable_tdc_limit_feature) {
0634                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
0635                 if (smc_result != PPSMC_Result_OK)
0636                     ret = -EINVAL;
0637                 else
0638                     pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
0639             }
0640 
0641             if (pi->enable_pkg_pwr_tracking_feature) {
0642                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
0643                 if (smc_result != PPSMC_Result_OK) {
0644                     ret = -EINVAL;
0645                 } else {
0646                     struct radeon_cac_tdp_table *cac_tdp_table =
0647                         rdev->pm.dpm.dyn_state.cac_tdp_table;
0648                     u32 default_pwr_limit =
0649                         (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
0650 
0651                     pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
0652 
0653                     ci_set_power_limit(rdev, default_pwr_limit);
0654                 }
0655             }
0656         }
0657     } else {
0658         if (pi->caps_power_containment && pi->power_containment_features) {
0659             if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
0660                 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
0661 
0662             if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
0663                 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
0664 
0665             if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
0666                 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
0667             pi->power_containment_features = 0;
0668         }
0669     }
0670 
0671     return ret;
0672 }
0673 
0674 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
0675 {
0676     struct ci_power_info *pi = ci_get_pi(rdev);
0677     PPSMC_Result smc_result;
0678     int ret = 0;
0679 
0680     if (pi->caps_cac) {
0681         if (enable) {
0682             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
0683             if (smc_result != PPSMC_Result_OK) {
0684                 ret = -EINVAL;
0685                 pi->cac_enabled = false;
0686             } else {
0687                 pi->cac_enabled = true;
0688             }
0689         } else if (pi->cac_enabled) {
0690             ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
0691             pi->cac_enabled = false;
0692         }
0693     }
0694 
0695     return ret;
0696 }
0697 
0698 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
0699                         bool enable)
0700 {
0701     struct ci_power_info *pi = ci_get_pi(rdev);
0702     PPSMC_Result smc_result = PPSMC_Result_OK;
0703 
0704     if (pi->thermal_sclk_dpm_enabled) {
0705         if (enable)
0706             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
0707         else
0708             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
0709     }
0710 
0711     if (smc_result == PPSMC_Result_OK)
0712         return 0;
0713     else
0714         return -EINVAL;
0715 }
0716 
0717 static int ci_power_control_set_level(struct radeon_device *rdev)
0718 {
0719     struct ci_power_info *pi = ci_get_pi(rdev);
0720     struct radeon_cac_tdp_table *cac_tdp_table =
0721         rdev->pm.dpm.dyn_state.cac_tdp_table;
0722     s32 adjust_percent;
0723     s32 target_tdp;
0724     int ret = 0;
0725     bool adjust_polarity = false; /* ??? */
0726 
0727     if (pi->caps_power_containment) {
0728         adjust_percent = adjust_polarity ?
0729             rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
0730         target_tdp = ((100 + adjust_percent) *
0731                   (s32)cac_tdp_table->configurable_tdp) / 100;
0732 
0733         ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
0734     }
0735 
0736     return ret;
0737 }
0738 
0739 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
0740 {
0741     struct ci_power_info *pi = ci_get_pi(rdev);
0742 
0743     if (pi->uvd_power_gated == gate)
0744         return;
0745 
0746     pi->uvd_power_gated = gate;
0747 
0748     ci_update_uvd_dpm(rdev, gate);
0749 }
0750 
0751 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
0752 {
0753     struct ci_power_info *pi = ci_get_pi(rdev);
0754     u32 vblank_time = r600_dpm_get_vblank_time(rdev);
0755     u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
0756 
0757     /* disable mclk switching if the refresh is >120Hz, even if the
0758         * blanking period would allow it
0759         */
0760     if (r600_dpm_get_vrefresh(rdev) > 120)
0761         return true;
0762 
0763     if (vblank_time < switch_limit)
0764         return true;
0765     else
0766         return false;
0767 
0768 }
0769 
0770 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
0771                     struct radeon_ps *rps)
0772 {
0773     struct ci_ps *ps = ci_get_ps(rps);
0774     struct ci_power_info *pi = ci_get_pi(rdev);
0775     struct radeon_clock_and_voltage_limits *max_limits;
0776     bool disable_mclk_switching;
0777     u32 sclk, mclk;
0778     int i;
0779 
0780     if (rps->vce_active) {
0781         rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
0782         rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
0783     } else {
0784         rps->evclk = 0;
0785         rps->ecclk = 0;
0786     }
0787 
0788     if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
0789         ci_dpm_vblank_too_short(rdev))
0790         disable_mclk_switching = true;
0791     else
0792         disable_mclk_switching = false;
0793 
0794     if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
0795         pi->battery_state = true;
0796     else
0797         pi->battery_state = false;
0798 
0799     if (rdev->pm.dpm.ac_power)
0800         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
0801     else
0802         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
0803 
0804     if (rdev->pm.dpm.ac_power == false) {
0805         for (i = 0; i < ps->performance_level_count; i++) {
0806             if (ps->performance_levels[i].mclk > max_limits->mclk)
0807                 ps->performance_levels[i].mclk = max_limits->mclk;
0808             if (ps->performance_levels[i].sclk > max_limits->sclk)
0809                 ps->performance_levels[i].sclk = max_limits->sclk;
0810         }
0811     }
0812 
0813     /* XXX validate the min clocks required for display */
0814 
0815     if (disable_mclk_switching) {
0816         mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
0817         sclk = ps->performance_levels[0].sclk;
0818     } else {
0819         mclk = ps->performance_levels[0].mclk;
0820         sclk = ps->performance_levels[0].sclk;
0821     }
0822 
0823     if (rps->vce_active) {
0824         if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
0825             sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
0826         if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
0827             mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
0828     }
0829 
0830     ps->performance_levels[0].sclk = sclk;
0831     ps->performance_levels[0].mclk = mclk;
0832 
0833     if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
0834         ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
0835 
0836     if (disable_mclk_switching) {
0837         if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
0838             ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
0839     } else {
0840         if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
0841             ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
0842     }
0843 }
0844 
0845 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
0846                         int min_temp, int max_temp)
0847 {
0848     int low_temp = 0 * 1000;
0849     int high_temp = 255 * 1000;
0850     u32 tmp;
0851 
0852     if (low_temp < min_temp)
0853         low_temp = min_temp;
0854     if (high_temp > max_temp)
0855         high_temp = max_temp;
0856     if (high_temp < low_temp) {
0857         DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
0858         return -EINVAL;
0859     }
0860 
0861     tmp = RREG32_SMC(CG_THERMAL_INT);
0862     tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
0863     tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
0864         CI_DIG_THERM_INTL(low_temp / 1000);
0865     WREG32_SMC(CG_THERMAL_INT, tmp);
0866 
0867 #if 0
0868     /* XXX: need to figure out how to handle this properly */
0869     tmp = RREG32_SMC(CG_THERMAL_CTRL);
0870     tmp &= DIG_THERM_DPM_MASK;
0871     tmp |= DIG_THERM_DPM(high_temp / 1000);
0872     WREG32_SMC(CG_THERMAL_CTRL, tmp);
0873 #endif
0874 
0875     rdev->pm.dpm.thermal.min_temp = low_temp;
0876     rdev->pm.dpm.thermal.max_temp = high_temp;
0877 
0878     return 0;
0879 }
0880 
0881 static int ci_thermal_enable_alert(struct radeon_device *rdev,
0882                    bool enable)
0883 {
0884     u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
0885     PPSMC_Result result;
0886 
0887     if (enable) {
0888         thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
0889         WREG32_SMC(CG_THERMAL_INT, thermal_int);
0890         rdev->irq.dpm_thermal = false;
0891         result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
0892         if (result != PPSMC_Result_OK) {
0893             DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
0894             return -EINVAL;
0895         }
0896     } else {
0897         thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
0898         WREG32_SMC(CG_THERMAL_INT, thermal_int);
0899         rdev->irq.dpm_thermal = true;
0900         result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
0901         if (result != PPSMC_Result_OK) {
0902             DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
0903             return -EINVAL;
0904         }
0905     }
0906 
0907     return 0;
0908 }
0909 
0910 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
0911 {
0912     struct ci_power_info *pi = ci_get_pi(rdev);
0913     u32 tmp;
0914 
0915     if (pi->fan_ctrl_is_in_default_mode) {
0916         tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
0917         pi->fan_ctrl_default_mode = tmp;
0918         tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
0919         pi->t_min = tmp;
0920         pi->fan_ctrl_is_in_default_mode = false;
0921     }
0922 
0923     tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
0924     tmp |= TMIN(0);
0925     WREG32_SMC(CG_FDO_CTRL2, tmp);
0926 
0927     tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
0928     tmp |= FDO_PWM_MODE(mode);
0929     WREG32_SMC(CG_FDO_CTRL2, tmp);
0930 }
0931 
0932 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
0933 {
0934     struct ci_power_info *pi = ci_get_pi(rdev);
0935     SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
0936     u32 duty100;
0937     u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
0938     u16 fdo_min, slope1, slope2;
0939     u32 reference_clock, tmp;
0940     int ret;
0941     u64 tmp64;
0942 
0943     if (!pi->fan_table_start) {
0944         rdev->pm.dpm.fan.ucode_fan_control = false;
0945         return 0;
0946     }
0947 
0948     duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
0949 
0950     if (duty100 == 0) {
0951         rdev->pm.dpm.fan.ucode_fan_control = false;
0952         return 0;
0953     }
0954 
0955     tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
0956     do_div(tmp64, 10000);
0957     fdo_min = (u16)tmp64;
0958 
0959     t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
0960     t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
0961 
0962     pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
0963     pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
0964 
0965     slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
0966     slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
0967 
0968     fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
0969     fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
0970     fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
0971 
0972     fan_table.Slope1 = cpu_to_be16(slope1);
0973     fan_table.Slope2 = cpu_to_be16(slope2);
0974 
0975     fan_table.FdoMin = cpu_to_be16(fdo_min);
0976 
0977     fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
0978 
0979     fan_table.HystUp = cpu_to_be16(1);
0980 
0981     fan_table.HystSlope = cpu_to_be16(1);
0982 
0983     fan_table.TempRespLim = cpu_to_be16(5);
0984 
0985     reference_clock = radeon_get_xclk(rdev);
0986 
0987     fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
0988                            reference_clock) / 1600);
0989 
0990     fan_table.FdoMax = cpu_to_be16((u16)duty100);
0991 
0992     tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
0993     fan_table.TempSrc = (uint8_t)tmp;
0994 
0995     ret = ci_copy_bytes_to_smc(rdev,
0996                    pi->fan_table_start,
0997                    (u8 *)(&fan_table),
0998                    sizeof(fan_table),
0999                    pi->sram_end);
1000 
1001     if (ret) {
1002         DRM_ERROR("Failed to load fan table to the SMC.");
1003         rdev->pm.dpm.fan.ucode_fan_control = false;
1004     }
1005 
1006     return 0;
1007 }
1008 
1009 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1010 {
1011     struct ci_power_info *pi = ci_get_pi(rdev);
1012     PPSMC_Result ret;
1013 
1014     if (pi->caps_od_fuzzy_fan_control_support) {
1015         ret = ci_send_msg_to_smc_with_parameter(rdev,
1016                             PPSMC_StartFanControl,
1017                             FAN_CONTROL_FUZZY);
1018         if (ret != PPSMC_Result_OK)
1019             return -EINVAL;
1020         ret = ci_send_msg_to_smc_with_parameter(rdev,
1021                             PPSMC_MSG_SetFanPwmMax,
1022                             rdev->pm.dpm.fan.default_max_fan_pwm);
1023         if (ret != PPSMC_Result_OK)
1024             return -EINVAL;
1025     } else {
1026         ret = ci_send_msg_to_smc_with_parameter(rdev,
1027                             PPSMC_StartFanControl,
1028                             FAN_CONTROL_TABLE);
1029         if (ret != PPSMC_Result_OK)
1030             return -EINVAL;
1031     }
1032 
1033     pi->fan_is_controlled_by_smc = true;
1034     return 0;
1035 }
1036 
1037 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1038 {
1039     PPSMC_Result ret;
1040     struct ci_power_info *pi = ci_get_pi(rdev);
1041 
1042     ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1043     if (ret == PPSMC_Result_OK) {
1044         pi->fan_is_controlled_by_smc = false;
1045         return 0;
1046     } else
1047         return -EINVAL;
1048 }
1049 
1050 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1051                          u32 *speed)
1052 {
1053     u32 duty, duty100;
1054     u64 tmp64;
1055 
1056     if (rdev->pm.no_fan)
1057         return -ENOENT;
1058 
1059     duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1060     duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1061 
1062     if (duty100 == 0)
1063         return -EINVAL;
1064 
1065     tmp64 = (u64)duty * 100;
1066     do_div(tmp64, duty100);
1067     *speed = (u32)tmp64;
1068 
1069     if (*speed > 100)
1070         *speed = 100;
1071 
1072     return 0;
1073 }
1074 
1075 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1076                          u32 speed)
1077 {
1078     u32 tmp;
1079     u32 duty, duty100;
1080     u64 tmp64;
1081     struct ci_power_info *pi = ci_get_pi(rdev);
1082 
1083     if (rdev->pm.no_fan)
1084         return -ENOENT;
1085 
1086     if (pi->fan_is_controlled_by_smc)
1087         return -EINVAL;
1088 
1089     if (speed > 100)
1090         return -EINVAL;
1091 
1092     duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1093 
1094     if (duty100 == 0)
1095         return -EINVAL;
1096 
1097     tmp64 = (u64)speed * duty100;
1098     do_div(tmp64, 100);
1099     duty = (u32)tmp64;
1100 
1101     tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1102     tmp |= FDO_STATIC_DUTY(duty);
1103     WREG32_SMC(CG_FDO_CTRL0, tmp);
1104 
1105     return 0;
1106 }
1107 
1108 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1109 {
1110     if (mode) {
1111         /* stop auto-manage */
1112         if (rdev->pm.dpm.fan.ucode_fan_control)
1113             ci_fan_ctrl_stop_smc_fan_control(rdev);
1114         ci_fan_ctrl_set_static_mode(rdev, mode);
1115     } else {
1116         /* restart auto-manage */
1117         if (rdev->pm.dpm.fan.ucode_fan_control)
1118             ci_thermal_start_smc_fan_control(rdev);
1119         else
1120             ci_fan_ctrl_set_default_mode(rdev);
1121     }
1122 }
1123 
1124 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1125 {
1126     struct ci_power_info *pi = ci_get_pi(rdev);
1127     u32 tmp;
1128 
1129     if (pi->fan_is_controlled_by_smc)
1130         return 0;
1131 
1132     tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1133     return (tmp >> FDO_PWM_MODE_SHIFT);
1134 }
1135 
1136 #if 0
1137 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1138                      u32 *speed)
1139 {
1140     u32 tach_period;
1141     u32 xclk = radeon_get_xclk(rdev);
1142 
1143     if (rdev->pm.no_fan)
1144         return -ENOENT;
1145 
1146     if (rdev->pm.fan_pulses_per_revolution == 0)
1147         return -ENOENT;
1148 
1149     tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1150     if (tach_period == 0)
1151         return -ENOENT;
1152 
1153     *speed = 60 * xclk * 10000 / tach_period;
1154 
1155     return 0;
1156 }
1157 
1158 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1159                      u32 speed)
1160 {
1161     u32 tach_period, tmp;
1162     u32 xclk = radeon_get_xclk(rdev);
1163 
1164     if (rdev->pm.no_fan)
1165         return -ENOENT;
1166 
1167     if (rdev->pm.fan_pulses_per_revolution == 0)
1168         return -ENOENT;
1169 
1170     if ((speed < rdev->pm.fan_min_rpm) ||
1171         (speed > rdev->pm.fan_max_rpm))
1172         return -EINVAL;
1173 
1174     if (rdev->pm.dpm.fan.ucode_fan_control)
1175         ci_fan_ctrl_stop_smc_fan_control(rdev);
1176 
1177     tach_period = 60 * xclk * 10000 / (8 * speed);
1178     tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1179     tmp |= TARGET_PERIOD(tach_period);
1180     WREG32_SMC(CG_TACH_CTRL, tmp);
1181 
1182     ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1183 
1184     return 0;
1185 }
1186 #endif
1187 
1188 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1189 {
1190     struct ci_power_info *pi = ci_get_pi(rdev);
1191     u32 tmp;
1192 
1193     if (!pi->fan_ctrl_is_in_default_mode) {
1194         tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1195         tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1196         WREG32_SMC(CG_FDO_CTRL2, tmp);
1197 
1198         tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1199         tmp |= TMIN(pi->t_min);
1200         WREG32_SMC(CG_FDO_CTRL2, tmp);
1201         pi->fan_ctrl_is_in_default_mode = true;
1202     }
1203 }
1204 
1205 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1206 {
1207     if (rdev->pm.dpm.fan.ucode_fan_control) {
1208         ci_fan_ctrl_start_smc_fan_control(rdev);
1209         ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1210     }
1211 }
1212 
1213 static void ci_thermal_initialize(struct radeon_device *rdev)
1214 {
1215     u32 tmp;
1216 
1217     if (rdev->pm.fan_pulses_per_revolution) {
1218         tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1219         tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1220         WREG32_SMC(CG_TACH_CTRL, tmp);
1221     }
1222 
1223     tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1224     tmp |= TACH_PWM_RESP_RATE(0x28);
1225     WREG32_SMC(CG_FDO_CTRL2, tmp);
1226 }
1227 
1228 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1229 {
1230     int ret;
1231 
1232     ci_thermal_initialize(rdev);
1233     ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1234     if (ret)
1235         return ret;
1236     ret = ci_thermal_enable_alert(rdev, true);
1237     if (ret)
1238         return ret;
1239     if (rdev->pm.dpm.fan.ucode_fan_control) {
1240         ret = ci_thermal_setup_fan_table(rdev);
1241         if (ret)
1242             return ret;
1243         ci_thermal_start_smc_fan_control(rdev);
1244     }
1245 
1246     return 0;
1247 }
1248 
1249 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1250 {
1251     if (!rdev->pm.no_fan)
1252         ci_fan_ctrl_set_default_mode(rdev);
1253 }
1254 
1255 #if 0
1256 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1257                      u16 reg_offset, u32 *value)
1258 {
1259     struct ci_power_info *pi = ci_get_pi(rdev);
1260 
1261     return ci_read_smc_sram_dword(rdev,
1262                       pi->soft_regs_start + reg_offset,
1263                       value, pi->sram_end);
1264 }
1265 #endif
1266 
1267 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1268                       u16 reg_offset, u32 value)
1269 {
1270     struct ci_power_info *pi = ci_get_pi(rdev);
1271 
1272     return ci_write_smc_sram_dword(rdev,
1273                        pi->soft_regs_start + reg_offset,
1274                        value, pi->sram_end);
1275 }
1276 
1277 static void ci_init_fps_limits(struct radeon_device *rdev)
1278 {
1279     struct ci_power_info *pi = ci_get_pi(rdev);
1280     SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1281 
1282     if (pi->caps_fps) {
1283         u16 tmp;
1284 
1285         tmp = 45;
1286         table->FpsHighT = cpu_to_be16(tmp);
1287 
1288         tmp = 30;
1289         table->FpsLowT = cpu_to_be16(tmp);
1290     }
1291 }
1292 
1293 static int ci_update_sclk_t(struct radeon_device *rdev)
1294 {
1295     struct ci_power_info *pi = ci_get_pi(rdev);
1296     int ret = 0;
1297     u32 low_sclk_interrupt_t = 0;
1298 
1299     if (pi->caps_sclk_throttle_low_notification) {
1300         low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1301 
1302         ret = ci_copy_bytes_to_smc(rdev,
1303                        pi->dpm_table_start +
1304                        offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1305                        (u8 *)&low_sclk_interrupt_t,
1306                        sizeof(u32), pi->sram_end);
1307 
1308     }
1309 
1310     return ret;
1311 }
1312 
1313 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1314 {
1315     struct ci_power_info *pi = ci_get_pi(rdev);
1316     u16 leakage_id, virtual_voltage_id;
1317     u16 vddc, vddci;
1318     int i;
1319 
1320     pi->vddc_leakage.count = 0;
1321     pi->vddci_leakage.count = 0;
1322 
1323     if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1324         for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1325             virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1326             if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1327                 continue;
1328             if (vddc != 0 && vddc != virtual_voltage_id) {
1329                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1330                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1331                 pi->vddc_leakage.count++;
1332             }
1333         }
1334     } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1335         for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1336             virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1337             if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1338                                          virtual_voltage_id,
1339                                          leakage_id) == 0) {
1340                 if (vddc != 0 && vddc != virtual_voltage_id) {
1341                     pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1342                     pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1343                     pi->vddc_leakage.count++;
1344                 }
1345                 if (vddci != 0 && vddci != virtual_voltage_id) {
1346                     pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1347                     pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1348                     pi->vddci_leakage.count++;
1349                 }
1350             }
1351         }
1352     }
1353 }
1354 
1355 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1356 {
1357     struct ci_power_info *pi = ci_get_pi(rdev);
1358     bool want_thermal_protection;
1359     u32 tmp;
1360 
1361     switch (sources) {
1362     case 0:
1363     default:
1364         want_thermal_protection = false;
1365         break;
1366     case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1367         want_thermal_protection = true;
1368         break;
1369     case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1370         want_thermal_protection = true;
1371         break;
1372     case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1373           (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1374         want_thermal_protection = true;
1375         break;
1376     }
1377 
1378     if (want_thermal_protection) {
1379         tmp = RREG32_SMC(GENERAL_PWRMGT);
1380         if (pi->thermal_protection)
1381             tmp &= ~THERMAL_PROTECTION_DIS;
1382         else
1383             tmp |= THERMAL_PROTECTION_DIS;
1384         WREG32_SMC(GENERAL_PWRMGT, tmp);
1385     } else {
1386         tmp = RREG32_SMC(GENERAL_PWRMGT);
1387         tmp |= THERMAL_PROTECTION_DIS;
1388         WREG32_SMC(GENERAL_PWRMGT, tmp);
1389     }
1390 }
1391 
1392 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1393                        enum radeon_dpm_auto_throttle_src source,
1394                        bool enable)
1395 {
1396     struct ci_power_info *pi = ci_get_pi(rdev);
1397 
1398     if (enable) {
1399         if (!(pi->active_auto_throttle_sources & (1 << source))) {
1400             pi->active_auto_throttle_sources |= 1 << source;
1401             ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1402         }
1403     } else {
1404         if (pi->active_auto_throttle_sources & (1 << source)) {
1405             pi->active_auto_throttle_sources &= ~(1 << source);
1406             ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1407         }
1408     }
1409 }
1410 
1411 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1412 {
1413     if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1414         ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1415 }
1416 
1417 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1418 {
1419     struct ci_power_info *pi = ci_get_pi(rdev);
1420     PPSMC_Result smc_result;
1421 
1422     if (!pi->need_update_smu7_dpm_table)
1423         return 0;
1424 
1425     if ((!pi->sclk_dpm_key_disabled) &&
1426         (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1427         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1428         if (smc_result != PPSMC_Result_OK)
1429             return -EINVAL;
1430     }
1431 
1432     if ((!pi->mclk_dpm_key_disabled) &&
1433         (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1434         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1435         if (smc_result != PPSMC_Result_OK)
1436             return -EINVAL;
1437     }
1438 
1439     pi->need_update_smu7_dpm_table = 0;
1440     return 0;
1441 }
1442 
1443 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1444 {
1445     struct ci_power_info *pi = ci_get_pi(rdev);
1446     PPSMC_Result smc_result;
1447 
1448     if (enable) {
1449         if (!pi->sclk_dpm_key_disabled) {
1450             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1451             if (smc_result != PPSMC_Result_OK)
1452                 return -EINVAL;
1453         }
1454 
1455         if (!pi->mclk_dpm_key_disabled) {
1456             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1457             if (smc_result != PPSMC_Result_OK)
1458                 return -EINVAL;
1459 
1460             WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1461 
1462             WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1463             WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1464             WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1465 
1466             udelay(10);
1467 
1468             WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1469             WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1470             WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1471         }
1472     } else {
1473         if (!pi->sclk_dpm_key_disabled) {
1474             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1475             if (smc_result != PPSMC_Result_OK)
1476                 return -EINVAL;
1477         }
1478 
1479         if (!pi->mclk_dpm_key_disabled) {
1480             smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1481             if (smc_result != PPSMC_Result_OK)
1482                 return -EINVAL;
1483         }
1484     }
1485 
1486     return 0;
1487 }
1488 
1489 static int ci_start_dpm(struct radeon_device *rdev)
1490 {
1491     struct ci_power_info *pi = ci_get_pi(rdev);
1492     PPSMC_Result smc_result;
1493     int ret;
1494     u32 tmp;
1495 
1496     tmp = RREG32_SMC(GENERAL_PWRMGT);
1497     tmp |= GLOBAL_PWRMGT_EN;
1498     WREG32_SMC(GENERAL_PWRMGT, tmp);
1499 
1500     tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1501     tmp |= DYNAMIC_PM_EN;
1502     WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1503 
1504     ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1505 
1506     WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1507 
1508     smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1509     if (smc_result != PPSMC_Result_OK)
1510         return -EINVAL;
1511 
1512     ret = ci_enable_sclk_mclk_dpm(rdev, true);
1513     if (ret)
1514         return ret;
1515 
1516     if (!pi->pcie_dpm_key_disabled) {
1517         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1518         if (smc_result != PPSMC_Result_OK)
1519             return -EINVAL;
1520     }
1521 
1522     return 0;
1523 }
1524 
1525 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1526 {
1527     struct ci_power_info *pi = ci_get_pi(rdev);
1528     PPSMC_Result smc_result;
1529 
1530     if (!pi->need_update_smu7_dpm_table)
1531         return 0;
1532 
1533     if ((!pi->sclk_dpm_key_disabled) &&
1534         (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1535         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1536         if (smc_result != PPSMC_Result_OK)
1537             return -EINVAL;
1538     }
1539 
1540     if ((!pi->mclk_dpm_key_disabled) &&
1541         (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1542         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1543         if (smc_result != PPSMC_Result_OK)
1544             return -EINVAL;
1545     }
1546 
1547     return 0;
1548 }
1549 
1550 static int ci_stop_dpm(struct radeon_device *rdev)
1551 {
1552     struct ci_power_info *pi = ci_get_pi(rdev);
1553     PPSMC_Result smc_result;
1554     int ret;
1555     u32 tmp;
1556 
1557     tmp = RREG32_SMC(GENERAL_PWRMGT);
1558     tmp &= ~GLOBAL_PWRMGT_EN;
1559     WREG32_SMC(GENERAL_PWRMGT, tmp);
1560 
1561     tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1562     tmp &= ~DYNAMIC_PM_EN;
1563     WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1564 
1565     if (!pi->pcie_dpm_key_disabled) {
1566         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1567         if (smc_result != PPSMC_Result_OK)
1568             return -EINVAL;
1569     }
1570 
1571     ret = ci_enable_sclk_mclk_dpm(rdev, false);
1572     if (ret)
1573         return ret;
1574 
1575     smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1576     if (smc_result != PPSMC_Result_OK)
1577         return -EINVAL;
1578 
1579     return 0;
1580 }
1581 
1582 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1583 {
1584     u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1585 
1586     if (enable)
1587         tmp &= ~SCLK_PWRMGT_OFF;
1588     else
1589         tmp |= SCLK_PWRMGT_OFF;
1590     WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1591 }
1592 
1593 #if 0
1594 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1595                     bool ac_power)
1596 {
1597     struct ci_power_info *pi = ci_get_pi(rdev);
1598     struct radeon_cac_tdp_table *cac_tdp_table =
1599         rdev->pm.dpm.dyn_state.cac_tdp_table;
1600     u32 power_limit;
1601 
1602     if (ac_power)
1603         power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1604     else
1605         power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1606 
1607     ci_set_power_limit(rdev, power_limit);
1608 
1609     if (pi->caps_automatic_dc_transition) {
1610         if (ac_power)
1611             ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1612         else
1613             ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1614     }
1615 
1616     return 0;
1617 }
1618 #endif
1619 
1620 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
1621 {
1622     u32 tmp;
1623     int i;
1624 
1625     if (!ci_is_smc_running(rdev))
1626         return PPSMC_Result_Failed;
1627 
1628     WREG32(SMC_MESSAGE_0, msg);
1629 
1630     for (i = 0; i < rdev->usec_timeout; i++) {
1631         tmp = RREG32(SMC_RESP_0);
1632         if (tmp != 0)
1633             break;
1634         udelay(1);
1635     }
1636     tmp = RREG32(SMC_RESP_0);
1637 
1638     return (PPSMC_Result)tmp;
1639 }
1640 
1641 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1642                               PPSMC_Msg msg, u32 parameter)
1643 {
1644     WREG32(SMC_MSG_ARG_0, parameter);
1645     return ci_send_msg_to_smc(rdev, msg);
1646 }
1647 
1648 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1649                             PPSMC_Msg msg, u32 *parameter)
1650 {
1651     PPSMC_Result smc_result;
1652 
1653     smc_result = ci_send_msg_to_smc(rdev, msg);
1654 
1655     if ((smc_result == PPSMC_Result_OK) && parameter)
1656         *parameter = RREG32(SMC_MSG_ARG_0);
1657 
1658     return smc_result;
1659 }
1660 
1661 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1662 {
1663     struct ci_power_info *pi = ci_get_pi(rdev);
1664 
1665     if (!pi->sclk_dpm_key_disabled) {
1666         PPSMC_Result smc_result =
1667             ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1668         if (smc_result != PPSMC_Result_OK)
1669             return -EINVAL;
1670     }
1671 
1672     return 0;
1673 }
1674 
1675 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1676 {
1677     struct ci_power_info *pi = ci_get_pi(rdev);
1678 
1679     if (!pi->mclk_dpm_key_disabled) {
1680         PPSMC_Result smc_result =
1681             ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1682         if (smc_result != PPSMC_Result_OK)
1683             return -EINVAL;
1684     }
1685 
1686     return 0;
1687 }
1688 
1689 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1690 {
1691     struct ci_power_info *pi = ci_get_pi(rdev);
1692 
1693     if (!pi->pcie_dpm_key_disabled) {
1694         PPSMC_Result smc_result =
1695             ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1696         if (smc_result != PPSMC_Result_OK)
1697             return -EINVAL;
1698     }
1699 
1700     return 0;
1701 }
1702 
1703 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1704 {
1705     struct ci_power_info *pi = ci_get_pi(rdev);
1706 
1707     if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1708         PPSMC_Result smc_result =
1709             ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1710         if (smc_result != PPSMC_Result_OK)
1711             return -EINVAL;
1712     }
1713 
1714     return 0;
1715 }
1716 
1717 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1718                        u32 target_tdp)
1719 {
1720     PPSMC_Result smc_result =
1721         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1722     if (smc_result != PPSMC_Result_OK)
1723         return -EINVAL;
1724     return 0;
1725 }
1726 
1727 #if 0
1728 static int ci_set_boot_state(struct radeon_device *rdev)
1729 {
1730     return ci_enable_sclk_mclk_dpm(rdev, false);
1731 }
1732 #endif
1733 
1734 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1735 {
1736     u32 sclk_freq;
1737     PPSMC_Result smc_result =
1738         ci_send_msg_to_smc_return_parameter(rdev,
1739                             PPSMC_MSG_API_GetSclkFrequency,
1740                             &sclk_freq);
1741     if (smc_result != PPSMC_Result_OK)
1742         sclk_freq = 0;
1743 
1744     return sclk_freq;
1745 }
1746 
1747 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1748 {
1749     u32 mclk_freq;
1750     PPSMC_Result smc_result =
1751         ci_send_msg_to_smc_return_parameter(rdev,
1752                             PPSMC_MSG_API_GetMclkFrequency,
1753                             &mclk_freq);
1754     if (smc_result != PPSMC_Result_OK)
1755         mclk_freq = 0;
1756 
1757     return mclk_freq;
1758 }
1759 
1760 static void ci_dpm_start_smc(struct radeon_device *rdev)
1761 {
1762     int i;
1763 
1764     ci_program_jump_on_start(rdev);
1765     ci_start_smc_clock(rdev);
1766     ci_start_smc(rdev);
1767     for (i = 0; i < rdev->usec_timeout; i++) {
1768         if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1769             break;
1770     }
1771 }
1772 
1773 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1774 {
1775     ci_reset_smc(rdev);
1776     ci_stop_smc_clock(rdev);
1777 }
1778 
1779 static int ci_process_firmware_header(struct radeon_device *rdev)
1780 {
1781     struct ci_power_info *pi = ci_get_pi(rdev);
1782     u32 tmp;
1783     int ret;
1784 
1785     ret = ci_read_smc_sram_dword(rdev,
1786                      SMU7_FIRMWARE_HEADER_LOCATION +
1787                      offsetof(SMU7_Firmware_Header, DpmTable),
1788                      &tmp, pi->sram_end);
1789     if (ret)
1790         return ret;
1791 
1792     pi->dpm_table_start = tmp;
1793 
1794     ret = ci_read_smc_sram_dword(rdev,
1795                      SMU7_FIRMWARE_HEADER_LOCATION +
1796                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1797                      &tmp, pi->sram_end);
1798     if (ret)
1799         return ret;
1800 
1801     pi->soft_regs_start = tmp;
1802 
1803     ret = ci_read_smc_sram_dword(rdev,
1804                      SMU7_FIRMWARE_HEADER_LOCATION +
1805                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1806                      &tmp, pi->sram_end);
1807     if (ret)
1808         return ret;
1809 
1810     pi->mc_reg_table_start = tmp;
1811 
1812     ret = ci_read_smc_sram_dword(rdev,
1813                      SMU7_FIRMWARE_HEADER_LOCATION +
1814                      offsetof(SMU7_Firmware_Header, FanTable),
1815                      &tmp, pi->sram_end);
1816     if (ret)
1817         return ret;
1818 
1819     pi->fan_table_start = tmp;
1820 
1821     ret = ci_read_smc_sram_dword(rdev,
1822                      SMU7_FIRMWARE_HEADER_LOCATION +
1823                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1824                      &tmp, pi->sram_end);
1825     if (ret)
1826         return ret;
1827 
1828     pi->arb_table_start = tmp;
1829 
1830     return 0;
1831 }
1832 
1833 static void ci_read_clock_registers(struct radeon_device *rdev)
1834 {
1835     struct ci_power_info *pi = ci_get_pi(rdev);
1836 
1837     pi->clock_registers.cg_spll_func_cntl =
1838         RREG32_SMC(CG_SPLL_FUNC_CNTL);
1839     pi->clock_registers.cg_spll_func_cntl_2 =
1840         RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1841     pi->clock_registers.cg_spll_func_cntl_3 =
1842         RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1843     pi->clock_registers.cg_spll_func_cntl_4 =
1844         RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1845     pi->clock_registers.cg_spll_spread_spectrum =
1846         RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1847     pi->clock_registers.cg_spll_spread_spectrum_2 =
1848         RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1849     pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1850     pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1851     pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1852     pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1853     pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1854     pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1855     pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1856     pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1857     pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1858 }
1859 
1860 static void ci_init_sclk_t(struct radeon_device *rdev)
1861 {
1862     struct ci_power_info *pi = ci_get_pi(rdev);
1863 
1864     pi->low_sclk_interrupt_t = 0;
1865 }
1866 
1867 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1868                      bool enable)
1869 {
1870     u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1871 
1872     if (enable)
1873         tmp &= ~THERMAL_PROTECTION_DIS;
1874     else
1875         tmp |= THERMAL_PROTECTION_DIS;
1876     WREG32_SMC(GENERAL_PWRMGT, tmp);
1877 }
1878 
1879 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1880 {
1881     u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1882 
1883     tmp |= STATIC_PM_EN;
1884 
1885     WREG32_SMC(GENERAL_PWRMGT, tmp);
1886 }
1887 
1888 #if 0
1889 static int ci_enter_ulp_state(struct radeon_device *rdev)
1890 {
1891 
1892     WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1893 
1894     udelay(25000);
1895 
1896     return 0;
1897 }
1898 
1899 static int ci_exit_ulp_state(struct radeon_device *rdev)
1900 {
1901     int i;
1902 
1903     WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1904 
1905     udelay(7000);
1906 
1907     for (i = 0; i < rdev->usec_timeout; i++) {
1908         if (RREG32(SMC_RESP_0) == 1)
1909             break;
1910         udelay(1000);
1911     }
1912 
1913     return 0;
1914 }
1915 #endif
1916 
1917 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1918                     bool has_display)
1919 {
1920     PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1921 
1922     return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1923 }
1924 
1925 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1926                       bool enable)
1927 {
1928     struct ci_power_info *pi = ci_get_pi(rdev);
1929 
1930     if (enable) {
1931         if (pi->caps_sclk_ds) {
1932             if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1933                 return -EINVAL;
1934         } else {
1935             if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1936                 return -EINVAL;
1937         }
1938     } else {
1939         if (pi->caps_sclk_ds) {
1940             if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1941                 return -EINVAL;
1942         }
1943     }
1944 
1945     return 0;
1946 }
1947 
1948 static void ci_program_display_gap(struct radeon_device *rdev)
1949 {
1950     u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1951     u32 pre_vbi_time_in_us;
1952     u32 frame_time_in_us;
1953     u32 ref_clock = rdev->clock.spll.reference_freq;
1954     u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1955     u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1956 
1957     tmp &= ~DISP_GAP_MASK;
1958     if (rdev->pm.dpm.new_active_crtc_count > 0)
1959         tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1960     else
1961         tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1962     WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1963 
1964     if (refresh_rate == 0)
1965         refresh_rate = 60;
1966     if (vblank_time == 0xffffffff)
1967         vblank_time = 500;
1968     frame_time_in_us = 1000000 / refresh_rate;
1969     pre_vbi_time_in_us =
1970         frame_time_in_us - 200 - vblank_time;
1971     tmp = pre_vbi_time_in_us * (ref_clock / 100);
1972 
1973     WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1974     ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1975     ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1976 
1977 
1978     ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1979 
1980 }
1981 
1982 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1983 {
1984     struct ci_power_info *pi = ci_get_pi(rdev);
1985     u32 tmp;
1986 
1987     if (enable) {
1988         if (pi->caps_sclk_ss_support) {
1989             tmp = RREG32_SMC(GENERAL_PWRMGT);
1990             tmp |= DYN_SPREAD_SPECTRUM_EN;
1991             WREG32_SMC(GENERAL_PWRMGT, tmp);
1992         }
1993     } else {
1994         tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1995         tmp &= ~SSEN;
1996         WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1997 
1998         tmp = RREG32_SMC(GENERAL_PWRMGT);
1999         tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2000         WREG32_SMC(GENERAL_PWRMGT, tmp);
2001     }
2002 }
2003 
2004 static void ci_program_sstp(struct radeon_device *rdev)
2005 {
2006     WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2007 }
2008 
2009 static void ci_enable_display_gap(struct radeon_device *rdev)
2010 {
2011     u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2012 
2013     tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2014     tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2015         DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2016 
2017     WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2018 }
2019 
2020 static void ci_program_vc(struct radeon_device *rdev)
2021 {
2022     u32 tmp;
2023 
2024     tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2025     tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2026     WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2027 
2028     WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2029     WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2030     WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2031     WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2032     WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2033     WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2034     WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2035     WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2036 }
2037 
2038 static void ci_clear_vc(struct radeon_device *rdev)
2039 {
2040     u32 tmp;
2041 
2042     tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2043     tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2044     WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2045 
2046     WREG32_SMC(CG_FTV_0, 0);
2047     WREG32_SMC(CG_FTV_1, 0);
2048     WREG32_SMC(CG_FTV_2, 0);
2049     WREG32_SMC(CG_FTV_3, 0);
2050     WREG32_SMC(CG_FTV_4, 0);
2051     WREG32_SMC(CG_FTV_5, 0);
2052     WREG32_SMC(CG_FTV_6, 0);
2053     WREG32_SMC(CG_FTV_7, 0);
2054 }
2055 
2056 static int ci_upload_firmware(struct radeon_device *rdev)
2057 {
2058     struct ci_power_info *pi = ci_get_pi(rdev);
2059     int i, ret;
2060 
2061     for (i = 0; i < rdev->usec_timeout; i++) {
2062         if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2063             break;
2064     }
2065     WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2066 
2067     ci_stop_smc_clock(rdev);
2068     ci_reset_smc(rdev);
2069 
2070     ret = ci_load_smc_ucode(rdev, pi->sram_end);
2071 
2072     return ret;
2073 
2074 }
2075 
2076 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2077                      struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2078                      struct atom_voltage_table *voltage_table)
2079 {
2080     u32 i;
2081 
2082     if (voltage_dependency_table == NULL)
2083         return -EINVAL;
2084 
2085     voltage_table->mask_low = 0;
2086     voltage_table->phase_delay = 0;
2087 
2088     voltage_table->count = voltage_dependency_table->count;
2089     for (i = 0; i < voltage_table->count; i++) {
2090         voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2091         voltage_table->entries[i].smio_low = 0;
2092     }
2093 
2094     return 0;
2095 }
2096 
2097 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2098 {
2099     struct ci_power_info *pi = ci_get_pi(rdev);
2100     int ret;
2101 
2102     if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2103         ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2104                             VOLTAGE_OBJ_GPIO_LUT,
2105                             &pi->vddc_voltage_table);
2106         if (ret)
2107             return ret;
2108     } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2109         ret = ci_get_svi2_voltage_table(rdev,
2110                         &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2111                         &pi->vddc_voltage_table);
2112         if (ret)
2113             return ret;
2114     }
2115 
2116     if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2117         si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2118                              &pi->vddc_voltage_table);
2119 
2120     if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2121         ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2122                             VOLTAGE_OBJ_GPIO_LUT,
2123                             &pi->vddci_voltage_table);
2124         if (ret)
2125             return ret;
2126     } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2127         ret = ci_get_svi2_voltage_table(rdev,
2128                         &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2129                         &pi->vddci_voltage_table);
2130         if (ret)
2131             return ret;
2132     }
2133 
2134     if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2135         si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2136                              &pi->vddci_voltage_table);
2137 
2138     if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2139         ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2140                             VOLTAGE_OBJ_GPIO_LUT,
2141                             &pi->mvdd_voltage_table);
2142         if (ret)
2143             return ret;
2144     } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2145         ret = ci_get_svi2_voltage_table(rdev,
2146                         &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2147                         &pi->mvdd_voltage_table);
2148         if (ret)
2149             return ret;
2150     }
2151 
2152     if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2153         si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2154                              &pi->mvdd_voltage_table);
2155 
2156     return 0;
2157 }
2158 
2159 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2160                       struct atom_voltage_table_entry *voltage_table,
2161                       SMU7_Discrete_VoltageLevel *smc_voltage_table)
2162 {
2163     int ret;
2164 
2165     ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2166                         &smc_voltage_table->StdVoltageHiSidd,
2167                         &smc_voltage_table->StdVoltageLoSidd);
2168 
2169     if (ret) {
2170         smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2171         smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2172     }
2173 
2174     smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2175     smc_voltage_table->StdVoltageHiSidd =
2176         cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2177     smc_voltage_table->StdVoltageLoSidd =
2178         cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2179 }
2180 
2181 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2182                       SMU7_Discrete_DpmTable *table)
2183 {
2184     struct ci_power_info *pi = ci_get_pi(rdev);
2185     unsigned int count;
2186 
2187     table->VddcLevelCount = pi->vddc_voltage_table.count;
2188     for (count = 0; count < table->VddcLevelCount; count++) {
2189         ci_populate_smc_voltage_table(rdev,
2190                           &pi->vddc_voltage_table.entries[count],
2191                           &table->VddcLevel[count]);
2192 
2193         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2194             table->VddcLevel[count].Smio |=
2195                 pi->vddc_voltage_table.entries[count].smio_low;
2196         else
2197             table->VddcLevel[count].Smio = 0;
2198     }
2199     table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2200 
2201     return 0;
2202 }
2203 
2204 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2205                        SMU7_Discrete_DpmTable *table)
2206 {
2207     unsigned int count;
2208     struct ci_power_info *pi = ci_get_pi(rdev);
2209 
2210     table->VddciLevelCount = pi->vddci_voltage_table.count;
2211     for (count = 0; count < table->VddciLevelCount; count++) {
2212         ci_populate_smc_voltage_table(rdev,
2213                           &pi->vddci_voltage_table.entries[count],
2214                           &table->VddciLevel[count]);
2215 
2216         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2217             table->VddciLevel[count].Smio |=
2218                 pi->vddci_voltage_table.entries[count].smio_low;
2219         else
2220             table->VddciLevel[count].Smio = 0;
2221     }
2222     table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2223 
2224     return 0;
2225 }
2226 
2227 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2228                       SMU7_Discrete_DpmTable *table)
2229 {
2230     struct ci_power_info *pi = ci_get_pi(rdev);
2231     unsigned int count;
2232 
2233     table->MvddLevelCount = pi->mvdd_voltage_table.count;
2234     for (count = 0; count < table->MvddLevelCount; count++) {
2235         ci_populate_smc_voltage_table(rdev,
2236                           &pi->mvdd_voltage_table.entries[count],
2237                           &table->MvddLevel[count]);
2238 
2239         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2240             table->MvddLevel[count].Smio |=
2241                 pi->mvdd_voltage_table.entries[count].smio_low;
2242         else
2243             table->MvddLevel[count].Smio = 0;
2244     }
2245     table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2246 
2247     return 0;
2248 }
2249 
2250 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2251                       SMU7_Discrete_DpmTable *table)
2252 {
2253     int ret;
2254 
2255     ret = ci_populate_smc_vddc_table(rdev, table);
2256     if (ret)
2257         return ret;
2258 
2259     ret = ci_populate_smc_vddci_table(rdev, table);
2260     if (ret)
2261         return ret;
2262 
2263     ret = ci_populate_smc_mvdd_table(rdev, table);
2264     if (ret)
2265         return ret;
2266 
2267     return 0;
2268 }
2269 
2270 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2271                   SMU7_Discrete_VoltageLevel *voltage)
2272 {
2273     struct ci_power_info *pi = ci_get_pi(rdev);
2274     u32 i = 0;
2275 
2276     if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2277         for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2278             if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2279                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2280                 break;
2281             }
2282         }
2283 
2284         if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2285             return -EINVAL;
2286     }
2287 
2288     return -EINVAL;
2289 }
2290 
2291 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2292                      struct atom_voltage_table_entry *voltage_table,
2293                      u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2294 {
2295     u16 v_index, idx;
2296     bool voltage_found = false;
2297     *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2298     *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2299 
2300     if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2301         return -EINVAL;
2302 
2303     if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2304         for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2305             if (voltage_table->value ==
2306                 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2307                 voltage_found = true;
2308                 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2309                     idx = v_index;
2310                 else
2311                     idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2312                 *std_voltage_lo_sidd =
2313                     rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2314                 *std_voltage_hi_sidd =
2315                     rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2316                 break;
2317             }
2318         }
2319 
2320         if (!voltage_found) {
2321             for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2322                 if (voltage_table->value <=
2323                     rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2324                     voltage_found = true;
2325                     if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2326                         idx = v_index;
2327                     else
2328                         idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2329                     *std_voltage_lo_sidd =
2330                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2331                     *std_voltage_hi_sidd =
2332                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2333                     break;
2334                 }
2335             }
2336         }
2337     }
2338 
2339     return 0;
2340 }
2341 
2342 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2343                           const struct radeon_phase_shedding_limits_table *limits,
2344                           u32 sclk,
2345                           u32 *phase_shedding)
2346 {
2347     unsigned int i;
2348 
2349     *phase_shedding = 1;
2350 
2351     for (i = 0; i < limits->count; i++) {
2352         if (sclk < limits->entries[i].sclk) {
2353             *phase_shedding = i;
2354             break;
2355         }
2356     }
2357 }
2358 
2359 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2360                           const struct radeon_phase_shedding_limits_table *limits,
2361                           u32 mclk,
2362                           u32 *phase_shedding)
2363 {
2364     unsigned int i;
2365 
2366     *phase_shedding = 1;
2367 
2368     for (i = 0; i < limits->count; i++) {
2369         if (mclk < limits->entries[i].mclk) {
2370             *phase_shedding = i;
2371             break;
2372         }
2373     }
2374 }
2375 
2376 static int ci_init_arb_table_index(struct radeon_device *rdev)
2377 {
2378     struct ci_power_info *pi = ci_get_pi(rdev);
2379     u32 tmp;
2380     int ret;
2381 
2382     ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2383                      &tmp, pi->sram_end);
2384     if (ret)
2385         return ret;
2386 
2387     tmp &= 0x00FFFFFF;
2388     tmp |= MC_CG_ARB_FREQ_F1 << 24;
2389 
2390     return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2391                        tmp, pi->sram_end);
2392 }
2393 
2394 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2395                      struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2396                      u32 clock, u32 *voltage)
2397 {
2398     u32 i = 0;
2399 
2400     if (allowed_clock_voltage_table->count == 0)
2401         return -EINVAL;
2402 
2403     for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2404         if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2405             *voltage = allowed_clock_voltage_table->entries[i].v;
2406             return 0;
2407         }
2408     }
2409 
2410     *voltage = allowed_clock_voltage_table->entries[i-1].v;
2411 
2412     return 0;
2413 }
2414 
2415 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2416                          u32 sclk, u32 min_sclk_in_sr)
2417 {
2418     u32 i;
2419     u32 tmp;
2420     u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2421         min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2422 
2423     if (sclk < min)
2424         return 0;
2425 
2426     for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2427         tmp = sclk / (1 << i);
2428         if (tmp >= min || i == 0)
2429             break;
2430     }
2431 
2432     return (u8)i;
2433 }
2434 
2435 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2436 {
2437     return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2438 }
2439 
2440 static int ci_reset_to_default(struct radeon_device *rdev)
2441 {
2442     return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2443         0 : -EINVAL;
2444 }
2445 
2446 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2447 {
2448     u32 tmp;
2449 
2450     tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2451 
2452     if (tmp == MC_CG_ARB_FREQ_F0)
2453         return 0;
2454 
2455     return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2456 }
2457 
2458 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2459                     const u32 engine_clock,
2460                     const u32 memory_clock,
2461                     u32 *dram_timimg2)
2462 {
2463     bool patch;
2464     u32 tmp, tmp2;
2465 
2466     tmp = RREG32(MC_SEQ_MISC0);
2467     patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2468 
2469     if (patch &&
2470         ((rdev->pdev->device == 0x67B0) ||
2471          (rdev->pdev->device == 0x67B1))) {
2472         if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2473             tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2474             *dram_timimg2 &= ~0x00ff0000;
2475             *dram_timimg2 |= tmp2 << 16;
2476         } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2477             tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2478             *dram_timimg2 &= ~0x00ff0000;
2479             *dram_timimg2 |= tmp2 << 16;
2480         }
2481     }
2482 }
2483 
2484 
2485 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2486                         u32 sclk,
2487                         u32 mclk,
2488                         SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2489 {
2490     u32 dram_timing;
2491     u32 dram_timing2;
2492     u32 burst_time;
2493 
2494     radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2495 
2496     dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2497     dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2498     burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2499 
2500     ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2501 
2502     arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2503     arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2504     arb_regs->McArbBurstTime = (u8)burst_time;
2505 
2506     return 0;
2507 }
2508 
2509 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2510 {
2511     struct ci_power_info *pi = ci_get_pi(rdev);
2512     SMU7_Discrete_MCArbDramTimingTable arb_regs;
2513     u32 i, j;
2514     int ret =  0;
2515 
2516     memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2517 
2518     for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2519         for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2520             ret = ci_populate_memory_timing_parameters(rdev,
2521                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2522                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2523                                    &arb_regs.entries[i][j]);
2524             if (ret)
2525                 break;
2526         }
2527     }
2528 
2529     if (ret == 0)
2530         ret = ci_copy_bytes_to_smc(rdev,
2531                        pi->arb_table_start,
2532                        (u8 *)&arb_regs,
2533                        sizeof(SMU7_Discrete_MCArbDramTimingTable),
2534                        pi->sram_end);
2535 
2536     return ret;
2537 }
2538 
2539 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2540 {
2541     struct ci_power_info *pi = ci_get_pi(rdev);
2542 
2543     if (pi->need_update_smu7_dpm_table == 0)
2544         return 0;
2545 
2546     return ci_do_program_memory_timing_parameters(rdev);
2547 }
2548 
2549 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2550                       struct radeon_ps *radeon_boot_state)
2551 {
2552     struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2553     struct ci_power_info *pi = ci_get_pi(rdev);
2554     u32 level = 0;
2555 
2556     for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2557         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2558             boot_state->performance_levels[0].sclk) {
2559             pi->smc_state_table.GraphicsBootLevel = level;
2560             break;
2561         }
2562     }
2563 
2564     for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2565         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2566             boot_state->performance_levels[0].mclk) {
2567             pi->smc_state_table.MemoryBootLevel = level;
2568             break;
2569         }
2570     }
2571 }
2572 
2573 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2574 {
2575     u32 i;
2576     u32 mask_value = 0;
2577 
2578     for (i = dpm_table->count; i > 0; i--) {
2579         mask_value = mask_value << 1;
2580         if (dpm_table->dpm_levels[i-1].enabled)
2581             mask_value |= 0x1;
2582         else
2583             mask_value &= 0xFFFFFFFE;
2584     }
2585 
2586     return mask_value;
2587 }
2588 
2589 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2590                        SMU7_Discrete_DpmTable *table)
2591 {
2592     struct ci_power_info *pi = ci_get_pi(rdev);
2593     struct ci_dpm_table *dpm_table = &pi->dpm_table;
2594     u32 i;
2595 
2596     for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2597         table->LinkLevel[i].PcieGenSpeed =
2598             (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2599         table->LinkLevel[i].PcieLaneCount =
2600             r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2601         table->LinkLevel[i].EnabledForActivity = 1;
2602         table->LinkLevel[i].DownT = cpu_to_be32(5);
2603         table->LinkLevel[i].UpT = cpu_to_be32(30);
2604     }
2605 
2606     pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2607     pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2608         ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2609 }
2610 
2611 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2612                      SMU7_Discrete_DpmTable *table)
2613 {
2614     u32 count;
2615     struct atom_clock_dividers dividers;
2616     int ret = -EINVAL;
2617 
2618     table->UvdLevelCount =
2619         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2620 
2621     for (count = 0; count < table->UvdLevelCount; count++) {
2622         table->UvdLevel[count].VclkFrequency =
2623             rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2624         table->UvdLevel[count].DclkFrequency =
2625             rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2626         table->UvdLevel[count].MinVddc =
2627             rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2628         table->UvdLevel[count].MinVddcPhases = 1;
2629 
2630         ret = radeon_atom_get_clock_dividers(rdev,
2631                              COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2632                              table->UvdLevel[count].VclkFrequency, false, &dividers);
2633         if (ret)
2634             return ret;
2635 
2636         table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2637 
2638         ret = radeon_atom_get_clock_dividers(rdev,
2639                              COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2640                              table->UvdLevel[count].DclkFrequency, false, &dividers);
2641         if (ret)
2642             return ret;
2643 
2644         table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2645 
2646         table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2647         table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2648         table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2649     }
2650 
2651     return ret;
2652 }
2653 
2654 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2655                      SMU7_Discrete_DpmTable *table)
2656 {
2657     u32 count;
2658     struct atom_clock_dividers dividers;
2659     int ret = -EINVAL;
2660 
2661     table->VceLevelCount =
2662         rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2663 
2664     for (count = 0; count < table->VceLevelCount; count++) {
2665         table->VceLevel[count].Frequency =
2666             rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2667         table->VceLevel[count].MinVoltage =
2668             (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2669         table->VceLevel[count].MinPhases = 1;
2670 
2671         ret = radeon_atom_get_clock_dividers(rdev,
2672                              COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2673                              table->VceLevel[count].Frequency, false, &dividers);
2674         if (ret)
2675             return ret;
2676 
2677         table->VceLevel[count].Divider = (u8)dividers.post_divider;
2678 
2679         table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2680         table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2681     }
2682 
2683     return ret;
2684 
2685 }
2686 
2687 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2688                      SMU7_Discrete_DpmTable *table)
2689 {
2690     u32 count;
2691     struct atom_clock_dividers dividers;
2692     int ret = -EINVAL;
2693 
2694     table->AcpLevelCount = (u8)
2695         (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2696 
2697     for (count = 0; count < table->AcpLevelCount; count++) {
2698         table->AcpLevel[count].Frequency =
2699             rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2700         table->AcpLevel[count].MinVoltage =
2701             rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2702         table->AcpLevel[count].MinPhases = 1;
2703 
2704         ret = radeon_atom_get_clock_dividers(rdev,
2705                              COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2706                              table->AcpLevel[count].Frequency, false, &dividers);
2707         if (ret)
2708             return ret;
2709 
2710         table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2711 
2712         table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2713         table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2714     }
2715 
2716     return ret;
2717 }
2718 
2719 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2720                       SMU7_Discrete_DpmTable *table)
2721 {
2722     u32 count;
2723     struct atom_clock_dividers dividers;
2724     int ret = -EINVAL;
2725 
2726     table->SamuLevelCount =
2727         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2728 
2729     for (count = 0; count < table->SamuLevelCount; count++) {
2730         table->SamuLevel[count].Frequency =
2731             rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2732         table->SamuLevel[count].MinVoltage =
2733             rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2734         table->SamuLevel[count].MinPhases = 1;
2735 
2736         ret = radeon_atom_get_clock_dividers(rdev,
2737                              COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2738                              table->SamuLevel[count].Frequency, false, &dividers);
2739         if (ret)
2740             return ret;
2741 
2742         table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2743 
2744         table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2745         table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2746     }
2747 
2748     return ret;
2749 }
2750 
2751 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2752                     u32 memory_clock,
2753                     SMU7_Discrete_MemoryLevel *mclk,
2754                     bool strobe_mode,
2755                     bool dll_state_on)
2756 {
2757     struct ci_power_info *pi = ci_get_pi(rdev);
2758     u32  dll_cntl = pi->clock_registers.dll_cntl;
2759     u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2760     u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2761     u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2762     u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2763     u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2764     u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2765     u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2766     u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2767     struct atom_mpll_param mpll_param;
2768     int ret;
2769 
2770     ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2771     if (ret)
2772         return ret;
2773 
2774     mpll_func_cntl &= ~BWCTRL_MASK;
2775     mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2776 
2777     mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2778     mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2779         CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2780 
2781     mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2782     mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2783 
2784     if (pi->mem_gddr5) {
2785         mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2786         mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2787             YCLK_POST_DIV(mpll_param.post_div);
2788     }
2789 
2790     if (pi->caps_mclk_ss_support) {
2791         struct radeon_atom_ss ss;
2792         u32 freq_nom;
2793         u32 tmp;
2794         u32 reference_clock = rdev->clock.mpll.reference_freq;
2795 
2796         if (mpll_param.qdr == 1)
2797             freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2798         else
2799             freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2800 
2801         tmp = (freq_nom / reference_clock);
2802         tmp = tmp * tmp;
2803         if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2804                              ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2805             u32 clks = reference_clock * 5 / ss.rate;
2806             u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2807 
2808             mpll_ss1 &= ~CLKV_MASK;
2809             mpll_ss1 |= CLKV(clkv);
2810 
2811             mpll_ss2 &= ~CLKS_MASK;
2812             mpll_ss2 |= CLKS(clks);
2813         }
2814     }
2815 
2816     mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2817     mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2818 
2819     if (dll_state_on)
2820         mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2821     else
2822         mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2823 
2824     mclk->MclkFrequency = memory_clock;
2825     mclk->MpllFuncCntl = mpll_func_cntl;
2826     mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2827     mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2828     mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2829     mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2830     mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2831     mclk->DllCntl = dll_cntl;
2832     mclk->MpllSs1 = mpll_ss1;
2833     mclk->MpllSs2 = mpll_ss2;
2834 
2835     return 0;
2836 }
2837 
2838 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2839                        u32 memory_clock,
2840                        SMU7_Discrete_MemoryLevel *memory_level)
2841 {
2842     struct ci_power_info *pi = ci_get_pi(rdev);
2843     int ret;
2844     bool dll_state_on;
2845 
2846     if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2847         ret = ci_get_dependency_volt_by_clk(rdev,
2848                             &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2849                             memory_clock, &memory_level->MinVddc);
2850         if (ret)
2851             return ret;
2852     }
2853 
2854     if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2855         ret = ci_get_dependency_volt_by_clk(rdev,
2856                             &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2857                             memory_clock, &memory_level->MinVddci);
2858         if (ret)
2859             return ret;
2860     }
2861 
2862     if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2863         ret = ci_get_dependency_volt_by_clk(rdev,
2864                             &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2865                             memory_clock, &memory_level->MinMvdd);
2866         if (ret)
2867             return ret;
2868     }
2869 
2870     memory_level->MinVddcPhases = 1;
2871 
2872     if (pi->vddc_phase_shed_control)
2873         ci_populate_phase_value_based_on_mclk(rdev,
2874                               &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2875                               memory_clock,
2876                               &memory_level->MinVddcPhases);
2877 
2878     memory_level->EnabledForThrottle = 1;
2879     memory_level->UpH = 0;
2880     memory_level->DownH = 100;
2881     memory_level->VoltageDownH = 0;
2882     memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2883 
2884     memory_level->StutterEnable = false;
2885     memory_level->StrobeEnable = false;
2886     memory_level->EdcReadEnable = false;
2887     memory_level->EdcWriteEnable = false;
2888     memory_level->RttEnable = false;
2889 
2890     memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2891 
2892     if (pi->mclk_stutter_mode_threshold &&
2893         (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2894         (pi->uvd_enabled == false) &&
2895         (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2896         (rdev->pm.dpm.new_active_crtc_count <= 2))
2897         memory_level->StutterEnable = true;
2898 
2899     if (pi->mclk_strobe_mode_threshold &&
2900         (memory_clock <= pi->mclk_strobe_mode_threshold))
2901         memory_level->StrobeEnable = 1;
2902 
2903     if (pi->mem_gddr5) {
2904         memory_level->StrobeRatio =
2905             si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2906         if (pi->mclk_edc_enable_threshold &&
2907             (memory_clock > pi->mclk_edc_enable_threshold))
2908             memory_level->EdcReadEnable = true;
2909 
2910         if (pi->mclk_edc_wr_enable_threshold &&
2911             (memory_clock > pi->mclk_edc_wr_enable_threshold))
2912             memory_level->EdcWriteEnable = true;
2913 
2914         if (memory_level->StrobeEnable) {
2915             if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2916                 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2917                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2918             else
2919                 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2920         } else {
2921             dll_state_on = pi->dll_default_on;
2922         }
2923     } else {
2924         memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2925         dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2926     }
2927 
2928     ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2929     if (ret)
2930         return ret;
2931 
2932     memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2933     memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2934     memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2935     memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2936 
2937     memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2938     memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2939     memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2940     memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2941     memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2942     memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2943     memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2944     memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2945     memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2946     memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2947     memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2948 
2949     return 0;
2950 }
2951 
2952 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2953                       SMU7_Discrete_DpmTable *table)
2954 {
2955     struct ci_power_info *pi = ci_get_pi(rdev);
2956     struct atom_clock_dividers dividers;
2957     SMU7_Discrete_VoltageLevel voltage_level;
2958     u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2959     u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2960     u32 dll_cntl = pi->clock_registers.dll_cntl;
2961     u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2962     int ret;
2963 
2964     table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2965 
2966     if (pi->acpi_vddc)
2967         table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2968     else
2969         table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2970 
2971     table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2972 
2973     table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2974 
2975     ret = radeon_atom_get_clock_dividers(rdev,
2976                          COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2977                          table->ACPILevel.SclkFrequency, false, &dividers);
2978     if (ret)
2979         return ret;
2980 
2981     table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2982     table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2983     table->ACPILevel.DeepSleepDivId = 0;
2984 
2985     spll_func_cntl &= ~SPLL_PWRON;
2986     spll_func_cntl |= SPLL_RESET;
2987 
2988     spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2989     spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2990 
2991     table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2992     table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2993     table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2994     table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2995     table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2996     table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2997     table->ACPILevel.CcPwrDynRm = 0;
2998     table->ACPILevel.CcPwrDynRm1 = 0;
2999 
3000     table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3001     table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3002     table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3003     table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3004     table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3005     table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3006     table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3007     table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3008     table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3009     table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3010     table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3011 
3012     table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3013     table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3014 
3015     if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3016         if (pi->acpi_vddci)
3017             table->MemoryACPILevel.MinVddci =
3018                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3019         else
3020             table->MemoryACPILevel.MinVddci =
3021                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3022     }
3023 
3024     if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3025         table->MemoryACPILevel.MinMvdd = 0;
3026     else
3027         table->MemoryACPILevel.MinMvdd =
3028             cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3029 
3030     mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3031     mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3032 
3033     dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3034 
3035     table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3036     table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3037     table->MemoryACPILevel.MpllAdFuncCntl =
3038         cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3039     table->MemoryACPILevel.MpllDqFuncCntl =
3040         cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3041     table->MemoryACPILevel.MpllFuncCntl =
3042         cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3043     table->MemoryACPILevel.MpllFuncCntl_1 =
3044         cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3045     table->MemoryACPILevel.MpllFuncCntl_2 =
3046         cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3047     table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3048     table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3049 
3050     table->MemoryACPILevel.EnabledForThrottle = 0;
3051     table->MemoryACPILevel.EnabledForActivity = 0;
3052     table->MemoryACPILevel.UpH = 0;
3053     table->MemoryACPILevel.DownH = 100;
3054     table->MemoryACPILevel.VoltageDownH = 0;
3055     table->MemoryACPILevel.ActivityLevel =
3056         cpu_to_be16((u16)pi->mclk_activity_target);
3057 
3058     table->MemoryACPILevel.StutterEnable = false;
3059     table->MemoryACPILevel.StrobeEnable = false;
3060     table->MemoryACPILevel.EdcReadEnable = false;
3061     table->MemoryACPILevel.EdcWriteEnable = false;
3062     table->MemoryACPILevel.RttEnable = false;
3063 
3064     return 0;
3065 }
3066 
3067 
3068 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3069 {
3070     struct ci_power_info *pi = ci_get_pi(rdev);
3071     struct ci_ulv_parm *ulv = &pi->ulv;
3072 
3073     if (ulv->supported) {
3074         if (enable)
3075             return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3076                 0 : -EINVAL;
3077         else
3078             return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3079                 0 : -EINVAL;
3080     }
3081 
3082     return 0;
3083 }
3084 
3085 static int ci_populate_ulv_level(struct radeon_device *rdev,
3086                  SMU7_Discrete_Ulv *state)
3087 {
3088     struct ci_power_info *pi = ci_get_pi(rdev);
3089     u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3090 
3091     state->CcPwrDynRm = 0;
3092     state->CcPwrDynRm1 = 0;
3093 
3094     if (ulv_voltage == 0) {
3095         pi->ulv.supported = false;
3096         return 0;
3097     }
3098 
3099     if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3100         if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3101             state->VddcOffset = 0;
3102         else
3103             state->VddcOffset =
3104                 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3105     } else {
3106         if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3107             state->VddcOffsetVid = 0;
3108         else
3109             state->VddcOffsetVid = (u8)
3110                 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3111                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3112     }
3113     state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3114 
3115     state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3116     state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3117     state->VddcOffset = cpu_to_be16(state->VddcOffset);
3118 
3119     return 0;
3120 }
3121 
3122 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3123                     u32 engine_clock,
3124                     SMU7_Discrete_GraphicsLevel *sclk)
3125 {
3126     struct ci_power_info *pi = ci_get_pi(rdev);
3127     struct atom_clock_dividers dividers;
3128     u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3129     u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3130     u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3131     u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3132     u32 reference_clock = rdev->clock.spll.reference_freq;
3133     u32 reference_divider;
3134     u32 fbdiv;
3135     int ret;
3136 
3137     ret = radeon_atom_get_clock_dividers(rdev,
3138                          COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3139                          engine_clock, false, &dividers);
3140     if (ret)
3141         return ret;
3142 
3143     reference_divider = 1 + dividers.ref_div;
3144     fbdiv = dividers.fb_div & 0x3FFFFFF;
3145 
3146     spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3147     spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3148     spll_func_cntl_3 |= SPLL_DITHEN;
3149 
3150     if (pi->caps_sclk_ss_support) {
3151         struct radeon_atom_ss ss;
3152         u32 vco_freq = engine_clock * dividers.post_div;
3153 
3154         if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3155                              ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3156             u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3157             u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3158 
3159             cg_spll_spread_spectrum &= ~CLK_S_MASK;
3160             cg_spll_spread_spectrum |= CLK_S(clk_s);
3161             cg_spll_spread_spectrum |= SSEN;
3162 
3163             cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3164             cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3165         }
3166     }
3167 
3168     sclk->SclkFrequency = engine_clock;
3169     sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3170     sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3171     sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3172     sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3173     sclk->SclkDid = (u8)dividers.post_divider;
3174 
3175     return 0;
3176 }
3177 
3178 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3179                         u32 engine_clock,
3180                         u16 sclk_activity_level_t,
3181                         SMU7_Discrete_GraphicsLevel *graphic_level)
3182 {
3183     struct ci_power_info *pi = ci_get_pi(rdev);
3184     int ret;
3185 
3186     ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3187     if (ret)
3188         return ret;
3189 
3190     ret = ci_get_dependency_volt_by_clk(rdev,
3191                         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3192                         engine_clock, &graphic_level->MinVddc);
3193     if (ret)
3194         return ret;
3195 
3196     graphic_level->SclkFrequency = engine_clock;
3197 
3198     graphic_level->Flags =  0;
3199     graphic_level->MinVddcPhases = 1;
3200 
3201     if (pi->vddc_phase_shed_control)
3202         ci_populate_phase_value_based_on_sclk(rdev,
3203                               &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3204                               engine_clock,
3205                               &graphic_level->MinVddcPhases);
3206 
3207     graphic_level->ActivityLevel = sclk_activity_level_t;
3208 
3209     graphic_level->CcPwrDynRm = 0;
3210     graphic_level->CcPwrDynRm1 = 0;
3211     graphic_level->EnabledForThrottle = 1;
3212     graphic_level->UpH = 0;
3213     graphic_level->DownH = 0;
3214     graphic_level->VoltageDownH = 0;
3215     graphic_level->PowerThrottle = 0;
3216 
3217     if (pi->caps_sclk_ds)
3218         graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3219                                            engine_clock,
3220                                            CISLAND_MINIMUM_ENGINE_CLOCK);
3221 
3222     graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3223 
3224     graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3225     graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3226     graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3227     graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3228     graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3229     graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3230     graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3231     graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3232     graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3233     graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3234     graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3235 
3236     return 0;
3237 }
3238 
3239 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3240 {
3241     struct ci_power_info *pi = ci_get_pi(rdev);
3242     struct ci_dpm_table *dpm_table = &pi->dpm_table;
3243     u32 level_array_address = pi->dpm_table_start +
3244         offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3245     u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3246         SMU7_MAX_LEVELS_GRAPHICS;
3247     SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3248     u32 i, ret;
3249 
3250     memset(levels, 0, level_array_size);
3251 
3252     for (i = 0; i < dpm_table->sclk_table.count; i++) {
3253         ret = ci_populate_single_graphic_level(rdev,
3254                                dpm_table->sclk_table.dpm_levels[i].value,
3255                                (u16)pi->activity_target[i],
3256                                &pi->smc_state_table.GraphicsLevel[i]);
3257         if (ret)
3258             return ret;
3259         if (i > 1)
3260             pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3261         if (i == (dpm_table->sclk_table.count - 1))
3262             pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3263                 PPSMC_DISPLAY_WATERMARK_HIGH;
3264     }
3265     pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3266 
3267     pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3268     pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3269         ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3270 
3271     ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3272                    (u8 *)levels, level_array_size,
3273                    pi->sram_end);
3274     if (ret)
3275         return ret;
3276 
3277     return 0;
3278 }
3279 
3280 static int ci_populate_ulv_state(struct radeon_device *rdev,
3281                  SMU7_Discrete_Ulv *ulv_level)
3282 {
3283     return ci_populate_ulv_level(rdev, ulv_level);
3284 }
3285 
3286 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3287 {
3288     struct ci_power_info *pi = ci_get_pi(rdev);
3289     struct ci_dpm_table *dpm_table = &pi->dpm_table;
3290     u32 level_array_address = pi->dpm_table_start +
3291         offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3292     u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3293         SMU7_MAX_LEVELS_MEMORY;
3294     SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3295     u32 i, ret;
3296 
3297     memset(levels, 0, level_array_size);
3298 
3299     for (i = 0; i < dpm_table->mclk_table.count; i++) {
3300         if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3301             return -EINVAL;
3302         ret = ci_populate_single_memory_level(rdev,
3303                               dpm_table->mclk_table.dpm_levels[i].value,
3304                               &pi->smc_state_table.MemoryLevel[i]);
3305         if (ret)
3306             return ret;
3307     }
3308 
3309     pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3310 
3311     if ((dpm_table->mclk_table.count >= 2) &&
3312         ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3313         pi->smc_state_table.MemoryLevel[1].MinVddc =
3314             pi->smc_state_table.MemoryLevel[0].MinVddc;
3315         pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3316             pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3317     }
3318 
3319     pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3320 
3321     pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3322     pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3323         ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3324 
3325     pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3326         PPSMC_DISPLAY_WATERMARK_HIGH;
3327 
3328     ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3329                    (u8 *)levels, level_array_size,
3330                    pi->sram_end);
3331     if (ret)
3332         return ret;
3333 
3334     return 0;
3335 }
3336 
3337 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3338                       struct ci_single_dpm_table* dpm_table,
3339                       u32 count)
3340 {
3341     u32 i;
3342 
3343     dpm_table->count = count;
3344     for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3345         dpm_table->dpm_levels[i].enabled = false;
3346 }
3347 
3348 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3349                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3350 {
3351     dpm_table->dpm_levels[index].value = pcie_gen;
3352     dpm_table->dpm_levels[index].param1 = pcie_lanes;
3353     dpm_table->dpm_levels[index].enabled = true;
3354 }
3355 
3356 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3357 {
3358     struct ci_power_info *pi = ci_get_pi(rdev);
3359 
3360     if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3361         return -EINVAL;
3362 
3363     if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3364         pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3365         pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3366     } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3367         pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3368         pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3369     }
3370 
3371     ci_reset_single_dpm_table(rdev,
3372                   &pi->dpm_table.pcie_speed_table,
3373                   SMU7_MAX_LEVELS_LINK);
3374 
3375     if (rdev->family == CHIP_BONAIRE)
3376         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3377                       pi->pcie_gen_powersaving.min,
3378                       pi->pcie_lane_powersaving.max);
3379     else
3380         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3381                       pi->pcie_gen_powersaving.min,
3382                       pi->pcie_lane_powersaving.min);
3383     ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3384                   pi->pcie_gen_performance.min,
3385                   pi->pcie_lane_performance.min);
3386     ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3387                   pi->pcie_gen_powersaving.min,
3388                   pi->pcie_lane_powersaving.max);
3389     ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3390                   pi->pcie_gen_performance.min,
3391                   pi->pcie_lane_performance.max);
3392     ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3393                   pi->pcie_gen_powersaving.max,
3394                   pi->pcie_lane_powersaving.max);
3395     ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3396                   pi->pcie_gen_performance.max,
3397                   pi->pcie_lane_performance.max);
3398 
3399     pi->dpm_table.pcie_speed_table.count = 6;
3400 
3401     return 0;
3402 }
3403 
3404 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3405 {
3406     struct ci_power_info *pi = ci_get_pi(rdev);
3407     struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3408         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3409     struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3410         &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3411     struct radeon_cac_leakage_table *std_voltage_table =
3412         &rdev->pm.dpm.dyn_state.cac_leakage_table;
3413     u32 i;
3414 
3415     if (allowed_sclk_vddc_table == NULL)
3416         return -EINVAL;
3417     if (allowed_sclk_vddc_table->count < 1)
3418         return -EINVAL;
3419     if (allowed_mclk_table == NULL)
3420         return -EINVAL;
3421     if (allowed_mclk_table->count < 1)
3422         return -EINVAL;
3423 
3424     memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3425 
3426     ci_reset_single_dpm_table(rdev,
3427                   &pi->dpm_table.sclk_table,
3428                   SMU7_MAX_LEVELS_GRAPHICS);
3429     ci_reset_single_dpm_table(rdev,
3430                   &pi->dpm_table.mclk_table,
3431                   SMU7_MAX_LEVELS_MEMORY);
3432     ci_reset_single_dpm_table(rdev,
3433                   &pi->dpm_table.vddc_table,
3434                   SMU7_MAX_LEVELS_VDDC);
3435     ci_reset_single_dpm_table(rdev,
3436                   &pi->dpm_table.vddci_table,
3437                   SMU7_MAX_LEVELS_VDDCI);
3438     ci_reset_single_dpm_table(rdev,
3439                   &pi->dpm_table.mvdd_table,
3440                   SMU7_MAX_LEVELS_MVDD);
3441 
3442     pi->dpm_table.sclk_table.count = 0;
3443     for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3444         if ((i == 0) ||
3445             (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3446              allowed_sclk_vddc_table->entries[i].clk)) {
3447             pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3448                 allowed_sclk_vddc_table->entries[i].clk;
3449             pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3450                 (i == 0) ? true : false;
3451             pi->dpm_table.sclk_table.count++;
3452         }
3453     }
3454 
3455     pi->dpm_table.mclk_table.count = 0;
3456     for (i = 0; i < allowed_mclk_table->count; i++) {
3457         if ((i == 0) ||
3458             (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3459              allowed_mclk_table->entries[i].clk)) {
3460             pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3461                 allowed_mclk_table->entries[i].clk;
3462             pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3463                 (i == 0) ? true : false;
3464             pi->dpm_table.mclk_table.count++;
3465         }
3466     }
3467 
3468     for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3469         pi->dpm_table.vddc_table.dpm_levels[i].value =
3470             allowed_sclk_vddc_table->entries[i].v;
3471         pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3472             std_voltage_table->entries[i].leakage;
3473         pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3474     }
3475     pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3476 
3477     allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3478     if (allowed_mclk_table) {
3479         for (i = 0; i < allowed_mclk_table->count; i++) {
3480             pi->dpm_table.vddci_table.dpm_levels[i].value =
3481                 allowed_mclk_table->entries[i].v;
3482             pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3483         }
3484         pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3485     }
3486 
3487     allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3488     if (allowed_mclk_table) {
3489         for (i = 0; i < allowed_mclk_table->count; i++) {
3490             pi->dpm_table.mvdd_table.dpm_levels[i].value =
3491                 allowed_mclk_table->entries[i].v;
3492             pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3493         }
3494         pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3495     }
3496 
3497     ci_setup_default_pcie_tables(rdev);
3498 
3499     return 0;
3500 }
3501 
3502 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3503                   u32 value, u32 *boot_level)
3504 {
3505     u32 i;
3506     int ret = -EINVAL;
3507 
3508     for(i = 0; i < table->count; i++) {
3509         if (value == table->dpm_levels[i].value) {
3510             *boot_level = i;
3511             ret = 0;
3512         }
3513     }
3514 
3515     return ret;
3516 }
3517 
3518 static int ci_init_smc_table(struct radeon_device *rdev)
3519 {
3520     struct ci_power_info *pi = ci_get_pi(rdev);
3521     struct ci_ulv_parm *ulv = &pi->ulv;
3522     struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3523     SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3524     int ret;
3525 
3526     ret = ci_setup_default_dpm_tables(rdev);
3527     if (ret)
3528         return ret;
3529 
3530     if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3531         ci_populate_smc_voltage_tables(rdev, table);
3532 
3533     ci_init_fps_limits(rdev);
3534 
3535     if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3536         table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3537 
3538     if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3539         table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3540 
3541     if (pi->mem_gddr5)
3542         table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3543 
3544     if (ulv->supported) {
3545         ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3546         if (ret)
3547             return ret;
3548         WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3549     }
3550 
3551     ret = ci_populate_all_graphic_levels(rdev);
3552     if (ret)
3553         return ret;
3554 
3555     ret = ci_populate_all_memory_levels(rdev);
3556     if (ret)
3557         return ret;
3558 
3559     ci_populate_smc_link_level(rdev, table);
3560 
3561     ret = ci_populate_smc_acpi_level(rdev, table);
3562     if (ret)
3563         return ret;
3564 
3565     ret = ci_populate_smc_vce_level(rdev, table);
3566     if (ret)
3567         return ret;
3568 
3569     ret = ci_populate_smc_acp_level(rdev, table);
3570     if (ret)
3571         return ret;
3572 
3573     ret = ci_populate_smc_samu_level(rdev, table);
3574     if (ret)
3575         return ret;
3576 
3577     ret = ci_do_program_memory_timing_parameters(rdev);
3578     if (ret)
3579         return ret;
3580 
3581     ret = ci_populate_smc_uvd_level(rdev, table);
3582     if (ret)
3583         return ret;
3584 
3585     table->UvdBootLevel  = 0;
3586     table->VceBootLevel  = 0;
3587     table->AcpBootLevel  = 0;
3588     table->SamuBootLevel  = 0;
3589     table->GraphicsBootLevel  = 0;
3590     table->MemoryBootLevel  = 0;
3591 
3592     ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3593                  pi->vbios_boot_state.sclk_bootup_value,
3594                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3595 
3596     ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3597                  pi->vbios_boot_state.mclk_bootup_value,
3598                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3599 
3600     table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3601     table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3602     table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3603 
3604     ci_populate_smc_initial_state(rdev, radeon_boot_state);
3605 
3606     ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3607     if (ret)
3608         return ret;
3609 
3610     table->UVDInterval = 1;
3611     table->VCEInterval = 1;
3612     table->ACPInterval = 1;
3613     table->SAMUInterval = 1;
3614     table->GraphicsVoltageChangeEnable = 1;
3615     table->GraphicsThermThrottleEnable = 1;
3616     table->GraphicsInterval = 1;
3617     table->VoltageInterval = 1;
3618     table->ThermalInterval = 1;
3619     table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3620                          CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3621     table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3622                         CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3623     table->MemoryVoltageChangeEnable = 1;
3624     table->MemoryInterval = 1;
3625     table->VoltageResponseTime = 0;
3626     table->VddcVddciDelta = 4000;
3627     table->PhaseResponseTime = 0;
3628     table->MemoryThermThrottleEnable = 1;
3629     table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3630     table->PCIeGenInterval = 1;
3631     if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3632         table->SVI2Enable  = 1;
3633     else
3634         table->SVI2Enable  = 0;
3635 
3636     table->ThermGpio = 17;
3637     table->SclkStepSize = 0x4000;
3638 
3639     table->SystemFlags = cpu_to_be32(table->SystemFlags);
3640     table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3641     table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3642     table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3643     table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3644     table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3645     table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3646     table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3647     table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3648     table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3649     table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3650     table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3651     table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3652     table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3653 
3654     ret = ci_copy_bytes_to_smc(rdev,
3655                    pi->dpm_table_start +
3656                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3657                    (u8 *)&table->SystemFlags,
3658                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3659                    pi->sram_end);
3660     if (ret)
3661         return ret;
3662 
3663     return 0;
3664 }
3665 
3666 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3667                       struct ci_single_dpm_table *dpm_table,
3668                       u32 low_limit, u32 high_limit)
3669 {
3670     u32 i;
3671 
3672     for (i = 0; i < dpm_table->count; i++) {
3673         if ((dpm_table->dpm_levels[i].value < low_limit) ||
3674             (dpm_table->dpm_levels[i].value > high_limit))
3675             dpm_table->dpm_levels[i].enabled = false;
3676         else
3677             dpm_table->dpm_levels[i].enabled = true;
3678     }
3679 }
3680 
3681 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3682                     u32 speed_low, u32 lanes_low,
3683                     u32 speed_high, u32 lanes_high)
3684 {
3685     struct ci_power_info *pi = ci_get_pi(rdev);
3686     struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3687     u32 i, j;
3688 
3689     for (i = 0; i < pcie_table->count; i++) {
3690         if ((pcie_table->dpm_levels[i].value < speed_low) ||
3691             (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3692             (pcie_table->dpm_levels[i].value > speed_high) ||
3693             (pcie_table->dpm_levels[i].param1 > lanes_high))
3694             pcie_table->dpm_levels[i].enabled = false;
3695         else
3696             pcie_table->dpm_levels[i].enabled = true;
3697     }
3698 
3699     for (i = 0; i < pcie_table->count; i++) {
3700         if (pcie_table->dpm_levels[i].enabled) {
3701             for (j = i + 1; j < pcie_table->count; j++) {
3702                 if (pcie_table->dpm_levels[j].enabled) {
3703                     if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3704                         (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3705                         pcie_table->dpm_levels[j].enabled = false;
3706                 }
3707             }
3708         }
3709     }
3710 }
3711 
3712 static int ci_trim_dpm_states(struct radeon_device *rdev,
3713                   struct radeon_ps *radeon_state)
3714 {
3715     struct ci_ps *state = ci_get_ps(radeon_state);
3716     struct ci_power_info *pi = ci_get_pi(rdev);
3717     u32 high_limit_count;
3718 
3719     if (state->performance_level_count < 1)
3720         return -EINVAL;
3721 
3722     if (state->performance_level_count == 1)
3723         high_limit_count = 0;
3724     else
3725         high_limit_count = 1;
3726 
3727     ci_trim_single_dpm_states(rdev,
3728                   &pi->dpm_table.sclk_table,
3729                   state->performance_levels[0].sclk,
3730                   state->performance_levels[high_limit_count].sclk);
3731 
3732     ci_trim_single_dpm_states(rdev,
3733                   &pi->dpm_table.mclk_table,
3734                   state->performance_levels[0].mclk,
3735                   state->performance_levels[high_limit_count].mclk);
3736 
3737     ci_trim_pcie_dpm_states(rdev,
3738                 state->performance_levels[0].pcie_gen,
3739                 state->performance_levels[0].pcie_lane,
3740                 state->performance_levels[high_limit_count].pcie_gen,
3741                 state->performance_levels[high_limit_count].pcie_lane);
3742 
3743     return 0;
3744 }
3745 
3746 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3747 {
3748     struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3749         &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3750     struct radeon_clock_voltage_dependency_table *vddc_table =
3751         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3752     u32 requested_voltage = 0;
3753     u32 i;
3754 
3755     if (disp_voltage_table == NULL)
3756         return -EINVAL;
3757     if (!disp_voltage_table->count)
3758         return -EINVAL;
3759 
3760     for (i = 0; i < disp_voltage_table->count; i++) {
3761         if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3762             requested_voltage = disp_voltage_table->entries[i].v;
3763     }
3764 
3765     for (i = 0; i < vddc_table->count; i++) {
3766         if (requested_voltage <= vddc_table->entries[i].v) {
3767             requested_voltage = vddc_table->entries[i].v;
3768             return (ci_send_msg_to_smc_with_parameter(rdev,
3769                                   PPSMC_MSG_VddC_Request,
3770                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3771                 0 : -EINVAL;
3772         }
3773     }
3774 
3775     return -EINVAL;
3776 }
3777 
3778 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3779 {
3780     struct ci_power_info *pi = ci_get_pi(rdev);
3781     PPSMC_Result result;
3782 
3783     ci_apply_disp_minimum_voltage_request(rdev);
3784 
3785     if (!pi->sclk_dpm_key_disabled) {
3786         if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3787             result = ci_send_msg_to_smc_with_parameter(rdev,
3788                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3789                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3790             if (result != PPSMC_Result_OK)
3791                 return -EINVAL;
3792         }
3793     }
3794 
3795     if (!pi->mclk_dpm_key_disabled) {
3796         if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3797             result = ci_send_msg_to_smc_with_parameter(rdev,
3798                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3799                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3800             if (result != PPSMC_Result_OK)
3801                 return -EINVAL;
3802         }
3803     }
3804 #if 0
3805     if (!pi->pcie_dpm_key_disabled) {
3806         if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3807             result = ci_send_msg_to_smc_with_parameter(rdev,
3808                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3809                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3810             if (result != PPSMC_Result_OK)
3811                 return -EINVAL;
3812         }
3813     }
3814 #endif
3815     return 0;
3816 }
3817 
3818 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3819                            struct radeon_ps *radeon_state)
3820 {
3821     struct ci_power_info *pi = ci_get_pi(rdev);
3822     struct ci_ps *state = ci_get_ps(radeon_state);
3823     struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3824     u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3825     struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3826     u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3827     u32 i;
3828 
3829     pi->need_update_smu7_dpm_table = 0;
3830 
3831     for (i = 0; i < sclk_table->count; i++) {
3832         if (sclk == sclk_table->dpm_levels[i].value)
3833             break;
3834     }
3835 
3836     if (i >= sclk_table->count) {
3837         pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3838     } else {
3839         /* XXX The current code always reprogrammed the sclk levels,
3840          * but we don't currently handle disp sclk requirements
3841          * so just skip it.
3842          */
3843         if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3844             pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3845     }
3846 
3847     for (i = 0; i < mclk_table->count; i++) {
3848         if (mclk == mclk_table->dpm_levels[i].value)
3849             break;
3850     }
3851 
3852     if (i >= mclk_table->count)
3853         pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3854 
3855     if (rdev->pm.dpm.current_active_crtc_count !=
3856         rdev->pm.dpm.new_active_crtc_count)
3857         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3858 }
3859 
3860 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3861                                struct radeon_ps *radeon_state)
3862 {
3863     struct ci_power_info *pi = ci_get_pi(rdev);
3864     struct ci_ps *state = ci_get_ps(radeon_state);
3865     u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3866     u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3867     struct ci_dpm_table *dpm_table = &pi->dpm_table;
3868     int ret;
3869 
3870     if (!pi->need_update_smu7_dpm_table)
3871         return 0;
3872 
3873     if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3874         dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3875 
3876     if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3877         dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3878 
3879     if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3880         ret = ci_populate_all_graphic_levels(rdev);
3881         if (ret)
3882             return ret;
3883     }
3884 
3885     if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3886         ret = ci_populate_all_memory_levels(rdev);
3887         if (ret)
3888             return ret;
3889     }
3890 
3891     return 0;
3892 }
3893 
3894 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3895 {
3896     struct ci_power_info *pi = ci_get_pi(rdev);
3897     const struct radeon_clock_and_voltage_limits *max_limits;
3898     int i;
3899 
3900     if (rdev->pm.dpm.ac_power)
3901         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3902     else
3903         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3904 
3905     if (enable) {
3906         pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3907 
3908         for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3909             if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3910                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3911 
3912                 if (!pi->caps_uvd_dpm)
3913                     break;
3914             }
3915         }
3916 
3917         ci_send_msg_to_smc_with_parameter(rdev,
3918                           PPSMC_MSG_UVDDPM_SetEnabledMask,
3919                           pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3920 
3921         if (pi->last_mclk_dpm_enable_mask & 0x1) {
3922             pi->uvd_enabled = true;
3923             pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3924             ci_send_msg_to_smc_with_parameter(rdev,
3925                               PPSMC_MSG_MCLKDPM_SetEnabledMask,
3926                               pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3927         }
3928     } else {
3929         if (pi->last_mclk_dpm_enable_mask & 0x1) {
3930             pi->uvd_enabled = false;
3931             pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3932             ci_send_msg_to_smc_with_parameter(rdev,
3933                               PPSMC_MSG_MCLKDPM_SetEnabledMask,
3934                               pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3935         }
3936     }
3937 
3938     return (ci_send_msg_to_smc(rdev, enable ?
3939                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3940         0 : -EINVAL;
3941 }
3942 
3943 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3944 {
3945     struct ci_power_info *pi = ci_get_pi(rdev);
3946     const struct radeon_clock_and_voltage_limits *max_limits;
3947     int i;
3948 
3949     if (rdev->pm.dpm.ac_power)
3950         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3951     else
3952         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3953 
3954     if (enable) {
3955         pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3956         for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3957             if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3958                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3959 
3960                 if (!pi->caps_vce_dpm)
3961                     break;
3962             }
3963         }
3964 
3965         ci_send_msg_to_smc_with_parameter(rdev,
3966                           PPSMC_MSG_VCEDPM_SetEnabledMask,
3967                           pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3968     }
3969 
3970     return (ci_send_msg_to_smc(rdev, enable ?
3971                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3972         0 : -EINVAL;
3973 }
3974 
3975 #if 0
3976 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3977 {
3978     struct ci_power_info *pi = ci_get_pi(rdev);
3979     const struct radeon_clock_and_voltage_limits *max_limits;
3980     int i;
3981 
3982     if (rdev->pm.dpm.ac_power)
3983         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3984     else
3985         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3986 
3987     if (enable) {
3988         pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3989         for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3990             if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3991                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3992 
3993                 if (!pi->caps_samu_dpm)
3994                     break;
3995             }
3996         }
3997 
3998         ci_send_msg_to_smc_with_parameter(rdev,
3999                           PPSMC_MSG_SAMUDPM_SetEnabledMask,
4000                           pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4001     }
4002     return (ci_send_msg_to_smc(rdev, enable ?
4003                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4004         0 : -EINVAL;
4005 }
4006 
4007 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4008 {
4009     struct ci_power_info *pi = ci_get_pi(rdev);
4010     const struct radeon_clock_and_voltage_limits *max_limits;
4011     int i;
4012 
4013     if (rdev->pm.dpm.ac_power)
4014         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4015     else
4016         max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4017 
4018     if (enable) {
4019         pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4020         for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4021             if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4022                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4023 
4024                 if (!pi->caps_acp_dpm)
4025                     break;
4026             }
4027         }
4028 
4029         ci_send_msg_to_smc_with_parameter(rdev,
4030                           PPSMC_MSG_ACPDPM_SetEnabledMask,
4031                           pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4032     }
4033 
4034     return (ci_send_msg_to_smc(rdev, enable ?
4035                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4036         0 : -EINVAL;
4037 }
4038 #endif
4039 
4040 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4041 {
4042     struct ci_power_info *pi = ci_get_pi(rdev);
4043     u32 tmp;
4044 
4045     if (!gate) {
4046         if (pi->caps_uvd_dpm ||
4047             (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4048             pi->smc_state_table.UvdBootLevel = 0;
4049         else
4050             pi->smc_state_table.UvdBootLevel =
4051                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4052 
4053         tmp = RREG32_SMC(DPM_TABLE_475);
4054         tmp &= ~UvdBootLevel_MASK;
4055         tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4056         WREG32_SMC(DPM_TABLE_475, tmp);
4057     }
4058 
4059     return ci_enable_uvd_dpm(rdev, !gate);
4060 }
4061 
4062 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4063 {
4064     u8 i;
4065     u32 min_evclk = 30000; /* ??? */
4066     struct radeon_vce_clock_voltage_dependency_table *table =
4067         &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4068 
4069     for (i = 0; i < table->count; i++) {
4070         if (table->entries[i].evclk >= min_evclk)
4071             return i;
4072     }
4073 
4074     return table->count - 1;
4075 }
4076 
4077 static int ci_update_vce_dpm(struct radeon_device *rdev,
4078                  struct radeon_ps *radeon_new_state,
4079                  struct radeon_ps *radeon_current_state)
4080 {
4081     struct ci_power_info *pi = ci_get_pi(rdev);
4082     int ret = 0;
4083     u32 tmp;
4084 
4085     if (radeon_current_state->evclk != radeon_new_state->evclk) {
4086         if (radeon_new_state->evclk) {
4087             /* turn the clocks on when encoding */
4088             cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4089 
4090             pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4091             tmp = RREG32_SMC(DPM_TABLE_475);
4092             tmp &= ~VceBootLevel_MASK;
4093             tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4094             WREG32_SMC(DPM_TABLE_475, tmp);
4095 
4096             ret = ci_enable_vce_dpm(rdev, true);
4097         } else {
4098             /* turn the clocks off when not encoding */
4099             cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4100 
4101             ret = ci_enable_vce_dpm(rdev, false);
4102         }
4103     }
4104     return ret;
4105 }
4106 
4107 #if 0
4108 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4109 {
4110     return ci_enable_samu_dpm(rdev, gate);
4111 }
4112 
4113 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4114 {
4115     struct ci_power_info *pi = ci_get_pi(rdev);
4116     u32 tmp;
4117 
4118     if (!gate) {
4119         pi->smc_state_table.AcpBootLevel = 0;
4120 
4121         tmp = RREG32_SMC(DPM_TABLE_475);
4122         tmp &= ~AcpBootLevel_MASK;
4123         tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4124         WREG32_SMC(DPM_TABLE_475, tmp);
4125     }
4126 
4127     return ci_enable_acp_dpm(rdev, !gate);
4128 }
4129 #endif
4130 
4131 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4132                          struct radeon_ps *radeon_state)
4133 {
4134     struct ci_power_info *pi = ci_get_pi(rdev);
4135     int ret;
4136 
4137     ret = ci_trim_dpm_states(rdev, radeon_state);
4138     if (ret)
4139         return ret;
4140 
4141     pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4142         ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4143     pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4144         ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4145     pi->last_mclk_dpm_enable_mask =
4146         pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4147     if (pi->uvd_enabled) {
4148         if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4149             pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4150     }
4151     pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4152         ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4153 
4154     return 0;
4155 }
4156 
4157 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4158                        u32 level_mask)
4159 {
4160     u32 level = 0;
4161 
4162     while ((level_mask & (1 << level)) == 0)
4163         level++;
4164 
4165     return level;
4166 }
4167 
4168 
4169 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4170                    enum radeon_dpm_forced_level level)
4171 {
4172     struct ci_power_info *pi = ci_get_pi(rdev);
4173     u32 tmp, levels, i;
4174     int ret;
4175 
4176     if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4177         if ((!pi->pcie_dpm_key_disabled) &&
4178             pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4179             levels = 0;
4180             tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4181             while (tmp >>= 1)
4182                 levels++;
4183             if (levels) {
4184                 ret = ci_dpm_force_state_pcie(rdev, level);
4185                 if (ret)
4186                     return ret;
4187                 for (i = 0; i < rdev->usec_timeout; i++) {
4188                     tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4189                            CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4190                     if (tmp == levels)
4191                         break;
4192                     udelay(1);
4193                 }
4194             }
4195         }
4196         if ((!pi->sclk_dpm_key_disabled) &&
4197             pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4198             levels = 0;
4199             tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4200             while (tmp >>= 1)
4201                 levels++;
4202             if (levels) {
4203                 ret = ci_dpm_force_state_sclk(rdev, levels);
4204                 if (ret)
4205                     return ret;
4206                 for (i = 0; i < rdev->usec_timeout; i++) {
4207                     tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4208                            CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4209                     if (tmp == levels)
4210                         break;
4211                     udelay(1);
4212                 }
4213             }
4214         }
4215         if ((!pi->mclk_dpm_key_disabled) &&
4216             pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4217             levels = 0;
4218             tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4219             while (tmp >>= 1)
4220                 levels++;
4221             if (levels) {
4222                 ret = ci_dpm_force_state_mclk(rdev, levels);
4223                 if (ret)
4224                     return ret;
4225                 for (i = 0; i < rdev->usec_timeout; i++) {
4226                     tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4227                            CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4228                     if (tmp == levels)
4229                         break;
4230                     udelay(1);
4231                 }
4232             }
4233         }
4234     } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4235         if ((!pi->sclk_dpm_key_disabled) &&
4236             pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4237             levels = ci_get_lowest_enabled_level(rdev,
4238                                  pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4239             ret = ci_dpm_force_state_sclk(rdev, levels);
4240             if (ret)
4241                 return ret;
4242             for (i = 0; i < rdev->usec_timeout; i++) {
4243                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4244                        CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4245                 if (tmp == levels)
4246                     break;
4247                 udelay(1);
4248             }
4249         }
4250         if ((!pi->mclk_dpm_key_disabled) &&
4251             pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4252             levels = ci_get_lowest_enabled_level(rdev,
4253                                  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4254             ret = ci_dpm_force_state_mclk(rdev, levels);
4255             if (ret)
4256                 return ret;
4257             for (i = 0; i < rdev->usec_timeout; i++) {
4258                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4259                        CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4260                 if (tmp == levels)
4261                     break;
4262                 udelay(1);
4263             }
4264         }
4265         if ((!pi->pcie_dpm_key_disabled) &&
4266             pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4267             levels = ci_get_lowest_enabled_level(rdev,
4268                                  pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4269             ret = ci_dpm_force_state_pcie(rdev, levels);
4270             if (ret)
4271                 return ret;
4272             for (i = 0; i < rdev->usec_timeout; i++) {
4273                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4274                        CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4275                 if (tmp == levels)
4276                     break;
4277                 udelay(1);
4278             }
4279         }
4280     } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4281         if (!pi->pcie_dpm_key_disabled) {
4282             PPSMC_Result smc_result;
4283 
4284             smc_result = ci_send_msg_to_smc(rdev,
4285                             PPSMC_MSG_PCIeDPM_UnForceLevel);
4286             if (smc_result != PPSMC_Result_OK)
4287                 return -EINVAL;
4288         }
4289         ret = ci_upload_dpm_level_enable_mask(rdev);
4290         if (ret)
4291             return ret;
4292     }
4293 
4294     rdev->pm.dpm.forced_level = level;
4295 
4296     return 0;
4297 }
4298 
4299 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4300                        struct ci_mc_reg_table *table)
4301 {
4302     struct ci_power_info *pi = ci_get_pi(rdev);
4303     u8 i, j, k;
4304     u32 temp_reg;
4305 
4306     for (i = 0, j = table->last; i < table->last; i++) {
4307         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4308             return -EINVAL;
4309         switch(table->mc_reg_address[i].s1 << 2) {
4310         case MC_SEQ_MISC1:
4311             temp_reg = RREG32(MC_PMG_CMD_EMRS);
4312             table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4313             table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4314             for (k = 0; k < table->num_entries; k++) {
4315                 table->mc_reg_table_entry[k].mc_data[j] =
4316                     ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4317             }
4318             j++;
4319             if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4320                 return -EINVAL;
4321 
4322             temp_reg = RREG32(MC_PMG_CMD_MRS);
4323             table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4324             table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4325             for (k = 0; k < table->num_entries; k++) {
4326                 table->mc_reg_table_entry[k].mc_data[j] =
4327                     (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4328                 if (!pi->mem_gddr5)
4329                     table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4330             }
4331             j++;
4332             if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4333                 return -EINVAL;
4334 
4335             if (!pi->mem_gddr5) {
4336                 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4337                 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4338                 for (k = 0; k < table->num_entries; k++) {
4339                     table->mc_reg_table_entry[k].mc_data[j] =
4340                         (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4341                 }
4342                 j++;
4343                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4344                     return -EINVAL;
4345             }
4346             break;
4347         case MC_SEQ_RESERVE_M:
4348             temp_reg = RREG32(MC_PMG_CMD_MRS1);
4349             table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4350             table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4351             for (k = 0; k < table->num_entries; k++) {
4352                 table->mc_reg_table_entry[k].mc_data[j] =
4353                     (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4354             }
4355             j++;
4356             if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4357                 return -EINVAL;
4358             break;
4359         default:
4360             break;
4361         }
4362 
4363     }
4364 
4365     table->last = j;
4366 
4367     return 0;
4368 }
4369 
4370 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4371 {
4372     bool result = true;
4373 
4374     switch(in_reg) {
4375     case MC_SEQ_RAS_TIMING >> 2:
4376         *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4377         break;
4378     case MC_SEQ_DLL_STBY >> 2:
4379         *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4380         break;
4381     case MC_SEQ_G5PDX_CMD0 >> 2:
4382         *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4383         break;
4384     case MC_SEQ_G5PDX_CMD1 >> 2:
4385         *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4386         break;
4387     case MC_SEQ_G5PDX_CTRL >> 2:
4388         *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4389         break;
4390     case MC_SEQ_CAS_TIMING >> 2:
4391         *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4392         break;
4393     case MC_SEQ_MISC_TIMING >> 2:
4394         *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4395         break;
4396     case MC_SEQ_MISC_TIMING2 >> 2:
4397         *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4398         break;
4399     case MC_SEQ_PMG_DVS_CMD >> 2:
4400         *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4401         break;
4402     case MC_SEQ_PMG_DVS_CTL >> 2:
4403         *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4404         break;
4405     case MC_SEQ_RD_CTL_D0 >> 2:
4406         *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4407         break;
4408     case MC_SEQ_RD_CTL_D1 >> 2:
4409         *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4410         break;
4411     case MC_SEQ_WR_CTL_D0 >> 2:
4412         *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4413         break;
4414     case MC_SEQ_WR_CTL_D1 >> 2:
4415         *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4416         break;
4417     case MC_PMG_CMD_EMRS >> 2:
4418         *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4419         break;
4420     case MC_PMG_CMD_MRS >> 2:
4421         *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4422         break;
4423     case MC_PMG_CMD_MRS1 >> 2:
4424         *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4425         break;
4426     case MC_SEQ_PMG_TIMING >> 2:
4427         *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4428         break;
4429     case MC_PMG_CMD_MRS2 >> 2:
4430         *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4431         break;
4432     case MC_SEQ_WR_CTL_2 >> 2:
4433         *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4434         break;
4435     default:
4436         result = false;
4437         break;
4438     }
4439 
4440     return result;
4441 }
4442 
4443 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4444 {
4445     u8 i, j;
4446 
4447     for (i = 0; i < table->last; i++) {
4448         for (j = 1; j < table->num_entries; j++) {
4449             if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4450                 table->mc_reg_table_entry[j].mc_data[i]) {
4451                 table->valid_flag |= 1 << i;
4452                 break;
4453             }
4454         }
4455     }
4456 }
4457 
4458 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4459 {
4460     u32 i;
4461     u16 address;
4462 
4463     for (i = 0; i < table->last; i++) {
4464         table->mc_reg_address[i].s0 =
4465             ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4466             address : table->mc_reg_address[i].s1;
4467     }
4468 }
4469 
4470 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4471                       struct ci_mc_reg_table *ci_table)
4472 {
4473     u8 i, j;
4474 
4475     if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4476         return -EINVAL;
4477     if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4478         return -EINVAL;
4479 
4480     for (i = 0; i < table->last; i++)
4481         ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4482 
4483     ci_table->last = table->last;
4484 
4485     for (i = 0; i < table->num_entries; i++) {
4486         ci_table->mc_reg_table_entry[i].mclk_max =
4487             table->mc_reg_table_entry[i].mclk_max;
4488         for (j = 0; j < table->last; j++)
4489             ci_table->mc_reg_table_entry[i].mc_data[j] =
4490                 table->mc_reg_table_entry[i].mc_data[j];
4491     }
4492     ci_table->num_entries = table->num_entries;
4493 
4494     return 0;
4495 }
4496 
4497 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4498                        struct ci_mc_reg_table *table)
4499 {
4500     u8 i, k;
4501     u32 tmp;
4502     bool patch;
4503 
4504     tmp = RREG32(MC_SEQ_MISC0);
4505     patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4506 
4507     if (patch &&
4508         ((rdev->pdev->device == 0x67B0) ||
4509          (rdev->pdev->device == 0x67B1))) {
4510         for (i = 0; i < table->last; i++) {
4511             if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4512                 return -EINVAL;
4513             switch(table->mc_reg_address[i].s1 >> 2) {
4514             case MC_SEQ_MISC1:
4515                 for (k = 0; k < table->num_entries; k++) {
4516                     if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4517                         (table->mc_reg_table_entry[k].mclk_max == 137500))
4518                         table->mc_reg_table_entry[k].mc_data[i] =
4519                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4520                             0x00000007;
4521                 }
4522                 break;
4523             case MC_SEQ_WR_CTL_D0:
4524                 for (k = 0; k < table->num_entries; k++) {
4525                     if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4526                         (table->mc_reg_table_entry[k].mclk_max == 137500))
4527                         table->mc_reg_table_entry[k].mc_data[i] =
4528                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4529                             0x0000D0DD;
4530                 }
4531                 break;
4532             case MC_SEQ_WR_CTL_D1:
4533                 for (k = 0; k < table->num_entries; k++) {
4534                     if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4535                         (table->mc_reg_table_entry[k].mclk_max == 137500))
4536                         table->mc_reg_table_entry[k].mc_data[i] =
4537                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4538                             0x0000D0DD;
4539                 }
4540                 break;
4541             case MC_SEQ_WR_CTL_2:
4542                 for (k = 0; k < table->num_entries; k++) {
4543                     if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4544                         (table->mc_reg_table_entry[k].mclk_max == 137500))
4545                         table->mc_reg_table_entry[k].mc_data[i] = 0;
4546                 }
4547                 break;
4548             case MC_SEQ_CAS_TIMING:
4549                 for (k = 0; k < table->num_entries; k++) {
4550                     if (table->mc_reg_table_entry[k].mclk_max == 125000)
4551                         table->mc_reg_table_entry[k].mc_data[i] =
4552                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4553                             0x000C0140;
4554                     else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4555                         table->mc_reg_table_entry[k].mc_data[i] =
4556                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4557                             0x000C0150;
4558                 }
4559                 break;
4560             case MC_SEQ_MISC_TIMING:
4561                 for (k = 0; k < table->num_entries; k++) {
4562                     if (table->mc_reg_table_entry[k].mclk_max == 125000)
4563                         table->mc_reg_table_entry[k].mc_data[i] =
4564                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4565                             0x00000030;
4566                     else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4567                         table->mc_reg_table_entry[k].mc_data[i] =
4568                             (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4569                             0x00000035;
4570                 }
4571                 break;
4572             default:
4573                 break;
4574             }
4575         }
4576 
4577         WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4578         tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4579         tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4580         WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4581         WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4582     }
4583 
4584     return 0;
4585 }
4586 
4587 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4588 {
4589     struct ci_power_info *pi = ci_get_pi(rdev);
4590     struct atom_mc_reg_table *table;
4591     struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4592     u8 module_index = rv770_get_memory_module_index(rdev);
4593     int ret;
4594 
4595     table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4596     if (!table)
4597         return -ENOMEM;
4598 
4599     WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4600     WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4601     WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4602     WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4603     WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4604     WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4605     WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4606     WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4607     WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4608     WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4609     WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4610     WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4611     WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4612     WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4613     WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4614     WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4615     WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4616     WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4617     WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4618     WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4619 
4620     ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4621     if (ret)
4622         goto init_mc_done;
4623 
4624     ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4625     if (ret)
4626         goto init_mc_done;
4627 
4628     ci_set_s0_mc_reg_index(ci_table);
4629 
4630     ret = ci_register_patching_mc_seq(rdev, ci_table);
4631     if (ret)
4632         goto init_mc_done;
4633 
4634     ret = ci_set_mc_special_registers(rdev, ci_table);
4635     if (ret)
4636         goto init_mc_done;
4637 
4638     ci_set_valid_flag(ci_table);
4639 
4640 init_mc_done:
4641     kfree(table);
4642 
4643     return ret;
4644 }
4645 
4646 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4647                     SMU7_Discrete_MCRegisters *mc_reg_table)
4648 {
4649     struct ci_power_info *pi = ci_get_pi(rdev);
4650     u32 i, j;
4651 
4652     for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4653         if (pi->mc_reg_table.valid_flag & (1 << j)) {
4654             if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4655                 return -EINVAL;
4656             mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4657             mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4658             i++;
4659         }
4660     }
4661 
4662     mc_reg_table->last = (u8)i;
4663 
4664     return 0;
4665 }
4666 
4667 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4668                     SMU7_Discrete_MCRegisterSet *data,
4669                     u32 num_entries, u32 valid_flag)
4670 {
4671     u32 i, j;
4672 
4673     for (i = 0, j = 0; j < num_entries; j++) {
4674         if (valid_flag & (1 << j)) {
4675             data->value[i] = cpu_to_be32(entry->mc_data[j]);
4676             i++;
4677         }
4678     }
4679 }
4680 
4681 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4682                          const u32 memory_clock,
4683                          SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4684 {
4685     struct ci_power_info *pi = ci_get_pi(rdev);
4686     u32 i = 0;
4687 
4688     for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4689         if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4690             break;
4691     }
4692 
4693     if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4694         --i;
4695 
4696     ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4697                 mc_reg_table_data, pi->mc_reg_table.last,
4698                 pi->mc_reg_table.valid_flag);
4699 }
4700 
4701 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4702                        SMU7_Discrete_MCRegisters *mc_reg_table)
4703 {
4704     struct ci_power_info *pi = ci_get_pi(rdev);
4705     u32 i;
4706 
4707     for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4708         ci_convert_mc_reg_table_entry_to_smc(rdev,
4709                              pi->dpm_table.mclk_table.dpm_levels[i].value,
4710                              &mc_reg_table->data[i]);
4711 }
4712 
4713 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4714 {
4715     struct ci_power_info *pi = ci_get_pi(rdev);
4716     int ret;
4717 
4718     memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4719 
4720     ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4721     if (ret)
4722         return ret;
4723     ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4724 
4725     return ci_copy_bytes_to_smc(rdev,
4726                     pi->mc_reg_table_start,
4727                     (u8 *)&pi->smc_mc_reg_table,
4728                     sizeof(SMU7_Discrete_MCRegisters),
4729                     pi->sram_end);
4730 }
4731 
4732 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4733 {
4734     struct ci_power_info *pi = ci_get_pi(rdev);
4735 
4736     if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4737         return 0;
4738 
4739     memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4740 
4741     ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4742 
4743     return ci_copy_bytes_to_smc(rdev,
4744                     pi->mc_reg_table_start +
4745                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4746                     (u8 *)&pi->smc_mc_reg_table.data[0],
4747                     sizeof(SMU7_Discrete_MCRegisterSet) *
4748                     pi->dpm_table.mclk_table.count,
4749                     pi->sram_end);
4750 }
4751 
4752 static void ci_enable_voltage_control(struct radeon_device *rdev)
4753 {
4754     u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4755 
4756     tmp |= VOLT_PWRMGT_EN;
4757     WREG32_SMC(GENERAL_PWRMGT, tmp);
4758 }
4759 
4760 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4761                               struct radeon_ps *radeon_state)
4762 {
4763     struct ci_ps *state = ci_get_ps(radeon_state);
4764     int i;
4765     u16 pcie_speed, max_speed = 0;
4766 
4767     for (i = 0; i < state->performance_level_count; i++) {
4768         pcie_speed = state->performance_levels[i].pcie_gen;
4769         if (max_speed < pcie_speed)
4770             max_speed = pcie_speed;
4771     }
4772 
4773     return max_speed;
4774 }
4775 
4776 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4777 {
4778     u32 speed_cntl = 0;
4779 
4780     speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4781     speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4782 
4783     return (u16)speed_cntl;
4784 }
4785 
4786 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4787 {
4788     u32 link_width = 0;
4789 
4790     link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4791     link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4792 
4793     switch (link_width) {
4794     case RADEON_PCIE_LC_LINK_WIDTH_X1:
4795         return 1;
4796     case RADEON_PCIE_LC_LINK_WIDTH_X2:
4797         return 2;
4798     case RADEON_PCIE_LC_LINK_WIDTH_X4:
4799         return 4;
4800     case RADEON_PCIE_LC_LINK_WIDTH_X8:
4801         return 8;
4802     case RADEON_PCIE_LC_LINK_WIDTH_X12:
4803         /* not actually supported */
4804         return 12;
4805     case RADEON_PCIE_LC_LINK_WIDTH_X0:
4806     case RADEON_PCIE_LC_LINK_WIDTH_X16:
4807     default:
4808         return 16;
4809     }
4810 }
4811 
4812 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4813                                  struct radeon_ps *radeon_new_state,
4814                                  struct radeon_ps *radeon_current_state)
4815 {
4816     struct ci_power_info *pi = ci_get_pi(rdev);
4817     enum radeon_pcie_gen target_link_speed =
4818         ci_get_maximum_link_speed(rdev, radeon_new_state);
4819     enum radeon_pcie_gen current_link_speed;
4820 
4821     if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4822         current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4823     else
4824         current_link_speed = pi->force_pcie_gen;
4825 
4826     pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4827     pi->pspp_notify_required = false;
4828     if (target_link_speed > current_link_speed) {
4829         switch (target_link_speed) {
4830 #ifdef CONFIG_ACPI
4831         case RADEON_PCIE_GEN3:
4832             if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4833                 break;
4834             pi->force_pcie_gen = RADEON_PCIE_GEN2;
4835             if (current_link_speed == RADEON_PCIE_GEN2)
4836                 break;
4837             fallthrough;
4838         case RADEON_PCIE_GEN2:
4839             if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4840                 break;
4841             fallthrough;
4842 #endif
4843         default:
4844             pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4845             break;
4846         }
4847     } else {
4848         if (target_link_speed < current_link_speed)
4849             pi->pspp_notify_required = true;
4850     }
4851 }
4852 
4853 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4854                                struct radeon_ps *radeon_new_state,
4855                                struct radeon_ps *radeon_current_state)
4856 {
4857     struct ci_power_info *pi = ci_get_pi(rdev);
4858     enum radeon_pcie_gen target_link_speed =
4859         ci_get_maximum_link_speed(rdev, radeon_new_state);
4860     u8 request;
4861 
4862     if (pi->pspp_notify_required) {
4863         if (target_link_speed == RADEON_PCIE_GEN3)
4864             request = PCIE_PERF_REQ_PECI_GEN3;
4865         else if (target_link_speed == RADEON_PCIE_GEN2)
4866             request = PCIE_PERF_REQ_PECI_GEN2;
4867         else
4868             request = PCIE_PERF_REQ_PECI_GEN1;
4869 
4870         if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4871             (ci_get_current_pcie_speed(rdev) > 0))
4872             return;
4873 
4874 #ifdef CONFIG_ACPI
4875         radeon_acpi_pcie_performance_request(rdev, request, false);
4876 #endif
4877     }
4878 }
4879 
4880 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4881 {
4882     struct ci_power_info *pi = ci_get_pi(rdev);
4883     struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4884         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4885     struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4886         &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4887     struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4888         &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4889 
4890     if (allowed_sclk_vddc_table == NULL)
4891         return -EINVAL;
4892     if (allowed_sclk_vddc_table->count < 1)
4893         return -EINVAL;
4894     if (allowed_mclk_vddc_table == NULL)
4895         return -EINVAL;
4896     if (allowed_mclk_vddc_table->count < 1)
4897         return -EINVAL;
4898     if (allowed_mclk_vddci_table == NULL)
4899         return -EINVAL;
4900     if (allowed_mclk_vddci_table->count < 1)
4901         return -EINVAL;
4902 
4903     pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4904     pi->max_vddc_in_pp_table =
4905         allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4906 
4907     pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4908     pi->max_vddci_in_pp_table =
4909         allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4910 
4911     rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4912         allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4913     rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4914         allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4915     rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4916         allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4917     rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4918         allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4919 
4920     return 0;
4921 }
4922 
4923 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4924 {
4925     struct ci_power_info *pi = ci_get_pi(rdev);
4926     struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4927     u32 leakage_index;
4928 
4929     for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4930         if (leakage_table->leakage_id[leakage_index] == *vddc) {
4931             *vddc = leakage_table->actual_voltage[leakage_index];
4932             break;
4933         }
4934     }
4935 }
4936 
4937 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4938 {
4939     struct ci_power_info *pi = ci_get_pi(rdev);
4940     struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4941     u32 leakage_index;
4942 
4943     for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4944         if (leakage_table->leakage_id[leakage_index] == *vddci) {
4945             *vddci = leakage_table->actual_voltage[leakage_index];
4946             break;
4947         }
4948     }
4949 }
4950 
4951 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4952                                       struct radeon_clock_voltage_dependency_table *table)
4953 {
4954     u32 i;
4955 
4956     if (table) {
4957         for (i = 0; i < table->count; i++)
4958             ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4959     }
4960 }
4961 
4962 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4963                                        struct radeon_clock_voltage_dependency_table *table)
4964 {
4965     u32 i;
4966 
4967     if (table) {
4968         for (i = 0; i < table->count; i++)
4969             ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4970     }
4971 }
4972 
4973 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4974                                       struct radeon_vce_clock_voltage_dependency_table *table)
4975 {
4976     u32 i;
4977 
4978     if (table) {
4979         for (i = 0; i < table->count; i++)
4980             ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4981     }
4982 }
4983 
4984 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4985                                       struct radeon_uvd_clock_voltage_dependency_table *table)
4986 {
4987     u32 i;
4988 
4989     if (table) {
4990         for (i = 0; i < table->count; i++)
4991             ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4992     }
4993 }
4994 
4995 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4996                                    struct radeon_phase_shedding_limits_table *table)
4997 {
4998     u32 i;
4999 
5000     if (table) {
5001         for (i = 0; i < table->count; i++)
5002             ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5003     }
5004 }
5005 
5006 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5007                                 struct radeon_clock_and_voltage_limits *table)
5008 {
5009     if (table) {
5010         ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5011         ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5012     }
5013 }
5014 
5015 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5016                              struct radeon_cac_leakage_table *table)
5017 {
5018     u32 i;
5019 
5020     if (table) {
5021         for (i = 0; i < table->count; i++)
5022             ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5023     }
5024 }
5025 
5026 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5027 {
5028 
5029     ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5030                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5031     ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5032                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5033     ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5034                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5035     ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5036                                    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5037     ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5038                                       &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5039     ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5040                                       &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5041     ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5042                                   &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5043     ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5044                                   &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5045     ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5046                                    &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5047     ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5048                             &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5049     ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5050                             &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5051     ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5052                              &rdev->pm.dpm.dyn_state.cac_leakage_table);
5053 
5054 }
5055 
5056 static void ci_get_memory_type(struct radeon_device *rdev)
5057 {
5058     struct ci_power_info *pi = ci_get_pi(rdev);
5059     u32 tmp;
5060 
5061     tmp = RREG32(MC_SEQ_MISC0);
5062 
5063     if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5064         MC_SEQ_MISC0_GDDR5_VALUE)
5065         pi->mem_gddr5 = true;
5066     else
5067         pi->mem_gddr5 = false;
5068 
5069 }
5070 
5071 static void ci_update_current_ps(struct radeon_device *rdev,
5072                  struct radeon_ps *rps)
5073 {
5074     struct ci_ps *new_ps = ci_get_ps(rps);
5075     struct ci_power_info *pi = ci_get_pi(rdev);
5076 
5077     pi->current_rps = *rps;
5078     pi->current_ps = *new_ps;
5079     pi->current_rps.ps_priv = &pi->current_ps;
5080 }
5081 
5082 static void ci_update_requested_ps(struct radeon_device *rdev,
5083                    struct radeon_ps *rps)
5084 {
5085     struct ci_ps *new_ps = ci_get_ps(rps);
5086     struct ci_power_info *pi = ci_get_pi(rdev);
5087 
5088     pi->requested_rps = *rps;
5089     pi->requested_ps = *new_ps;
5090     pi->requested_rps.ps_priv = &pi->requested_ps;
5091 }
5092 
5093 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5094 {
5095     struct ci_power_info *pi = ci_get_pi(rdev);
5096     struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5097     struct radeon_ps *new_ps = &requested_ps;
5098 
5099     ci_update_requested_ps(rdev, new_ps);
5100 
5101     ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5102 
5103     return 0;
5104 }
5105 
5106 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5107 {
5108     struct ci_power_info *pi = ci_get_pi(rdev);
5109     struct radeon_ps *new_ps = &pi->requested_rps;
5110 
5111     ci_update_current_ps(rdev, new_ps);
5112 }
5113 
5114 
5115 void ci_dpm_setup_asic(struct radeon_device *rdev)
5116 {
5117     int r;
5118 
5119     r = ci_mc_load_microcode(rdev);
5120     if (r)
5121         DRM_ERROR("Failed to load MC firmware!\n");
5122     ci_read_clock_registers(rdev);
5123     ci_get_memory_type(rdev);
5124     ci_enable_acpi_power_management(rdev);
5125     ci_init_sclk_t(rdev);
5126 }
5127 
5128 int ci_dpm_enable(struct radeon_device *rdev)
5129 {
5130     struct ci_power_info *pi = ci_get_pi(rdev);
5131     struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5132     int ret;
5133 
5134     if (ci_is_smc_running(rdev))
5135         return -EINVAL;
5136     if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5137         ci_enable_voltage_control(rdev);
5138         ret = ci_construct_voltage_tables(rdev);
5139         if (ret) {
5140             DRM_ERROR("ci_construct_voltage_tables failed\n");
5141             return ret;
5142         }
5143     }
5144     if (pi->caps_dynamic_ac_timing) {
5145         ret = ci_initialize_mc_reg_table(rdev);
5146         if (ret)
5147             pi->caps_dynamic_ac_timing = false;
5148     }
5149     if (pi->dynamic_ss)
5150         ci_enable_spread_spectrum(rdev, true);
5151     if (pi->thermal_protection)
5152         ci_enable_thermal_protection(rdev, true);
5153     ci_program_sstp(rdev);
5154     ci_enable_display_gap(rdev);
5155     ci_program_vc(rdev);
5156     ret = ci_upload_firmware(rdev);
5157     if (ret) {
5158         DRM_ERROR("ci_upload_firmware failed\n");
5159         return ret;
5160     }
5161     ret = ci_process_firmware_header(rdev);
5162     if (ret) {
5163         DRM_ERROR("ci_process_firmware_header failed\n");
5164         return ret;
5165     }
5166     ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5167     if (ret) {
5168         DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5169         return ret;
5170     }
5171     ret = ci_init_smc_table(rdev);
5172     if (ret) {
5173         DRM_ERROR("ci_init_smc_table failed\n");
5174         return ret;
5175     }
5176     ret = ci_init_arb_table_index(rdev);
5177     if (ret) {
5178         DRM_ERROR("ci_init_arb_table_index failed\n");
5179         return ret;
5180     }
5181     if (pi->caps_dynamic_ac_timing) {
5182         ret = ci_populate_initial_mc_reg_table(rdev);
5183         if (ret) {
5184             DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5185             return ret;
5186         }
5187     }
5188     ret = ci_populate_pm_base(rdev);
5189     if (ret) {
5190         DRM_ERROR("ci_populate_pm_base failed\n");
5191         return ret;
5192     }
5193     ci_dpm_start_smc(rdev);
5194     ci_enable_vr_hot_gpio_interrupt(rdev);
5195     ret = ci_notify_smc_display_change(rdev, false);
5196     if (ret) {
5197         DRM_ERROR("ci_notify_smc_display_change failed\n");
5198         return ret;
5199     }
5200     ci_enable_sclk_control(rdev, true);
5201     ret = ci_enable_ulv(rdev, true);
5202     if (ret) {
5203         DRM_ERROR("ci_enable_ulv failed\n");
5204         return ret;
5205     }
5206     ret = ci_enable_ds_master_switch(rdev, true);
5207     if (ret) {
5208         DRM_ERROR("ci_enable_ds_master_switch failed\n");
5209         return ret;
5210     }
5211     ret = ci_start_dpm(rdev);
5212     if (ret) {
5213         DRM_ERROR("ci_start_dpm failed\n");
5214         return ret;
5215     }
5216     ret = ci_enable_didt(rdev, true);
5217     if (ret) {
5218         DRM_ERROR("ci_enable_didt failed\n");
5219         return ret;
5220     }
5221     ret = ci_enable_smc_cac(rdev, true);
5222     if (ret) {
5223         DRM_ERROR("ci_enable_smc_cac failed\n");
5224         return ret;
5225     }
5226     ret = ci_enable_power_containment(rdev, true);
5227     if (ret) {
5228         DRM_ERROR("ci_enable_power_containment failed\n");
5229         return ret;
5230     }
5231 
5232     ret = ci_power_control_set_level(rdev);
5233     if (ret) {
5234         DRM_ERROR("ci_power_control_set_level failed\n");
5235         return ret;
5236     }
5237 
5238     ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5239 
5240     ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5241     if (ret) {
5242         DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5243         return ret;
5244     }
5245 
5246     ci_thermal_start_thermal_controller(rdev);
5247 
5248     ci_update_current_ps(rdev, boot_ps);
5249 
5250     return 0;
5251 }
5252 
5253 static int ci_set_temperature_range(struct radeon_device *rdev)
5254 {
5255     int ret;
5256 
5257     ret = ci_thermal_enable_alert(rdev, false);
5258     if (ret)
5259         return ret;
5260     ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5261     if (ret)
5262         return ret;
5263     ret = ci_thermal_enable_alert(rdev, true);
5264     if (ret)
5265         return ret;
5266 
5267     return ret;
5268 }
5269 
5270 int ci_dpm_late_enable(struct radeon_device *rdev)
5271 {
5272     int ret;
5273 
5274     ret = ci_set_temperature_range(rdev);
5275     if (ret)
5276         return ret;
5277 
5278     ci_dpm_powergate_uvd(rdev, true);
5279 
5280     return 0;
5281 }
5282 
5283 void ci_dpm_disable(struct radeon_device *rdev)
5284 {
5285     struct ci_power_info *pi = ci_get_pi(rdev);
5286     struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5287 
5288     ci_dpm_powergate_uvd(rdev, false);
5289 
5290     if (!ci_is_smc_running(rdev))
5291         return;
5292 
5293     ci_thermal_stop_thermal_controller(rdev);
5294 
5295     if (pi->thermal_protection)
5296         ci_enable_thermal_protection(rdev, false);
5297     ci_enable_power_containment(rdev, false);
5298     ci_enable_smc_cac(rdev, false);
5299     ci_enable_didt(rdev, false);
5300     ci_enable_spread_spectrum(rdev, false);
5301     ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5302     ci_stop_dpm(rdev);
5303     ci_enable_ds_master_switch(rdev, false);
5304     ci_enable_ulv(rdev, false);
5305     ci_clear_vc(rdev);
5306     ci_reset_to_default(rdev);
5307     ci_dpm_stop_smc(rdev);
5308     ci_force_switch_to_arb_f0(rdev);
5309     ci_enable_thermal_based_sclk_dpm(rdev, false);
5310 
5311     ci_update_current_ps(rdev, boot_ps);
5312 }
5313 
5314 int ci_dpm_set_power_state(struct radeon_device *rdev)
5315 {
5316     struct ci_power_info *pi = ci_get_pi(rdev);
5317     struct radeon_ps *new_ps = &pi->requested_rps;
5318     struct radeon_ps *old_ps = &pi->current_rps;
5319     int ret;
5320 
5321     ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5322     if (pi->pcie_performance_request)
5323         ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5324     ret = ci_freeze_sclk_mclk_dpm(rdev);
5325     if (ret) {
5326         DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5327         return ret;
5328     }
5329     ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5330     if (ret) {
5331         DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5332         return ret;
5333     }
5334     ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5335     if (ret) {
5336         DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5337         return ret;
5338     }
5339 
5340     ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5341     if (ret) {
5342         DRM_ERROR("ci_update_vce_dpm failed\n");
5343         return ret;
5344     }
5345 
5346     ret = ci_update_sclk_t(rdev);
5347     if (ret) {
5348         DRM_ERROR("ci_update_sclk_t failed\n");
5349         return ret;
5350     }
5351     if (pi->caps_dynamic_ac_timing) {
5352         ret = ci_update_and_upload_mc_reg_table(rdev);
5353         if (ret) {
5354             DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5355             return ret;
5356         }
5357     }
5358     ret = ci_program_memory_timing_parameters(rdev);
5359     if (ret) {
5360         DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5361         return ret;
5362     }
5363     ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5364     if (ret) {
5365         DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5366         return ret;
5367     }
5368     ret = ci_upload_dpm_level_enable_mask(rdev);
5369     if (ret) {
5370         DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5371         return ret;
5372     }
5373     if (pi->pcie_performance_request)
5374         ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5375 
5376     return 0;
5377 }
5378 
5379 #if 0
5380 void ci_dpm_reset_asic(struct radeon_device *rdev)
5381 {
5382     ci_set_boot_state(rdev);
5383 }
5384 #endif
5385 
5386 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5387 {
5388     ci_program_display_gap(rdev);
5389 }
5390 
5391 union power_info {
5392     struct _ATOM_POWERPLAY_INFO info;
5393     struct _ATOM_POWERPLAY_INFO_V2 info_2;
5394     struct _ATOM_POWERPLAY_INFO_V3 info_3;
5395     struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5396     struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5397     struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5398 };
5399 
5400 union pplib_clock_info {
5401     struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5402     struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5403     struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5404     struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5405     struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5406     struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5407 };
5408 
5409 union pplib_power_state {
5410     struct _ATOM_PPLIB_STATE v1;
5411     struct _ATOM_PPLIB_STATE_V2 v2;
5412 };
5413 
5414 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5415                       struct radeon_ps *rps,
5416                       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5417                       u8 table_rev)
5418 {
5419     rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5420     rps->class = le16_to_cpu(non_clock_info->usClassification);
5421     rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5422 
5423     if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5424         rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5425         rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5426     } else {
5427         rps->vclk = 0;
5428         rps->dclk = 0;
5429     }
5430 
5431     if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5432         rdev->pm.dpm.boot_ps = rps;
5433     if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5434         rdev->pm.dpm.uvd_ps = rps;
5435 }
5436 
5437 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5438                       struct radeon_ps *rps, int index,
5439                       union pplib_clock_info *clock_info)
5440 {
5441     struct ci_power_info *pi = ci_get_pi(rdev);
5442     struct ci_ps *ps = ci_get_ps(rps);
5443     struct ci_pl *pl = &ps->performance_levels[index];
5444 
5445     ps->performance_level_count = index + 1;
5446 
5447     pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5448     pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5449     pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5450     pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5451 
5452     pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5453                          pi->sys_pcie_mask,
5454                          pi->vbios_boot_state.pcie_gen_bootup_value,
5455                          clock_info->ci.ucPCIEGen);
5456     pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5457                            pi->vbios_boot_state.pcie_lane_bootup_value,
5458                            le16_to_cpu(clock_info->ci.usPCIELane));
5459 
5460     if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5461         pi->acpi_pcie_gen = pl->pcie_gen;
5462     }
5463 
5464     if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5465         pi->ulv.supported = true;
5466         pi->ulv.pl = *pl;
5467         pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5468     }
5469 
5470     /* patch up boot state */
5471     if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5472         pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5473         pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5474         pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5475         pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5476     }
5477 
5478     switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5479     case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5480         pi->use_pcie_powersaving_levels = true;
5481         if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5482             pi->pcie_gen_powersaving.max = pl->pcie_gen;
5483         if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5484             pi->pcie_gen_powersaving.min = pl->pcie_gen;
5485         if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5486             pi->pcie_lane_powersaving.max = pl->pcie_lane;
5487         if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5488             pi->pcie_lane_powersaving.min = pl->pcie_lane;
5489         break;
5490     case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5491         pi->use_pcie_performance_levels = true;
5492         if (pi->pcie_gen_performance.max < pl->pcie_gen)
5493             pi->pcie_gen_performance.max = pl->pcie_gen;
5494         if (pi->pcie_gen_performance.min > pl->pcie_gen)
5495             pi->pcie_gen_performance.min = pl->pcie_gen;
5496         if (pi->pcie_lane_performance.max < pl->pcie_lane)
5497             pi->pcie_lane_performance.max = pl->pcie_lane;
5498         if (pi->pcie_lane_performance.min > pl->pcie_lane)
5499             pi->pcie_lane_performance.min = pl->pcie_lane;
5500         break;
5501     default:
5502         break;
5503     }
5504 }
5505 
5506 static int ci_parse_power_table(struct radeon_device *rdev)
5507 {
5508     struct radeon_mode_info *mode_info = &rdev->mode_info;
5509     struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5510     union pplib_power_state *power_state;
5511     int i, j, k, non_clock_array_index, clock_array_index;
5512     union pplib_clock_info *clock_info;
5513     struct _StateArray *state_array;
5514     struct _ClockInfoArray *clock_info_array;
5515     struct _NonClockInfoArray *non_clock_info_array;
5516     union power_info *power_info;
5517     int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5518     u16 data_offset;
5519     u8 frev, crev;
5520     u8 *power_state_offset;
5521     struct ci_ps *ps;
5522 
5523     if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5524                    &frev, &crev, &data_offset))
5525         return -EINVAL;
5526     power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5527 
5528     state_array = (struct _StateArray *)
5529         (mode_info->atom_context->bios + data_offset +
5530          le16_to_cpu(power_info->pplib.usStateArrayOffset));
5531     clock_info_array = (struct _ClockInfoArray *)
5532         (mode_info->atom_context->bios + data_offset +
5533          le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5534     non_clock_info_array = (struct _NonClockInfoArray *)
5535         (mode_info->atom_context->bios + data_offset +
5536          le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5537 
5538     rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5539                   sizeof(struct radeon_ps),
5540                   GFP_KERNEL);
5541     if (!rdev->pm.dpm.ps)
5542         return -ENOMEM;
5543     power_state_offset = (u8 *)state_array->states;
5544     rdev->pm.dpm.num_ps = 0;
5545     for (i = 0; i < state_array->ucNumEntries; i++) {
5546         u8 *idx;
5547         power_state = (union pplib_power_state *)power_state_offset;
5548         non_clock_array_index = power_state->v2.nonClockInfoIndex;
5549         non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5550             &non_clock_info_array->nonClockInfo[non_clock_array_index];
5551         if (!rdev->pm.power_state[i].clock_info)
5552             return -EINVAL;
5553         ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5554         if (ps == NULL)
5555             return -ENOMEM;
5556         rdev->pm.dpm.ps[i].ps_priv = ps;
5557         ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5558                           non_clock_info,
5559                           non_clock_info_array->ucEntrySize);
5560         k = 0;
5561         idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5562         for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5563             clock_array_index = idx[j];
5564             if (clock_array_index >= clock_info_array->ucNumEntries)
5565                 continue;
5566             if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5567                 break;
5568             clock_info = (union pplib_clock_info *)
5569                 ((u8 *)&clock_info_array->clockInfo[0] +
5570                  (clock_array_index * clock_info_array->ucEntrySize));
5571             ci_parse_pplib_clock_info(rdev,
5572                           &rdev->pm.dpm.ps[i], k,
5573                           clock_info);
5574             k++;
5575         }
5576         power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5577         rdev->pm.dpm.num_ps = i + 1;
5578     }
5579 
5580     /* fill in the vce power states */
5581     for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5582         u32 sclk, mclk;
5583         clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5584         clock_info = (union pplib_clock_info *)
5585             &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5586         sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5587         sclk |= clock_info->ci.ucEngineClockHigh << 16;
5588         mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5589         mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5590         rdev->pm.dpm.vce_states[i].sclk = sclk;
5591         rdev->pm.dpm.vce_states[i].mclk = mclk;
5592     }
5593 
5594     return 0;
5595 }
5596 
5597 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5598                     struct ci_vbios_boot_state *boot_state)
5599 {
5600     struct radeon_mode_info *mode_info = &rdev->mode_info;
5601     int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5602     ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5603     u8 frev, crev;
5604     u16 data_offset;
5605 
5606     if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5607                    &frev, &crev, &data_offset)) {
5608         firmware_info =
5609             (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5610                             data_offset);
5611         boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5612         boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5613         boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5614         boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5615         boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5616         boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5617         boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5618 
5619         return 0;
5620     }
5621     return -EINVAL;
5622 }
5623 
5624 void ci_dpm_fini(struct radeon_device *rdev)
5625 {
5626     int i;
5627 
5628     for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5629         kfree(rdev->pm.dpm.ps[i].ps_priv);
5630     }
5631     kfree(rdev->pm.dpm.ps);
5632     kfree(rdev->pm.dpm.priv);
5633     kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5634     r600_free_extended_power_table(rdev);
5635 }
5636 
5637 int ci_dpm_init(struct radeon_device *rdev)
5638 {
5639     int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5640     SMU7_Discrete_DpmTable  *dpm_table;
5641     struct radeon_gpio_rec gpio;
5642     u16 data_offset, size;
5643     u8 frev, crev;
5644     struct ci_power_info *pi;
5645     enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5646     struct pci_dev *root = rdev->pdev->bus->self;
5647     int ret;
5648 
5649     pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5650     if (pi == NULL)
5651         return -ENOMEM;
5652     rdev->pm.dpm.priv = pi;
5653 
5654     if (!pci_is_root_bus(rdev->pdev->bus))
5655         speed_cap = pcie_get_speed_cap(root);
5656     if (speed_cap == PCI_SPEED_UNKNOWN) {
5657         pi->sys_pcie_mask = 0;
5658     } else {
5659         if (speed_cap == PCIE_SPEED_8_0GT)
5660             pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5661                 RADEON_PCIE_SPEED_50 |
5662                 RADEON_PCIE_SPEED_80;
5663         else if (speed_cap == PCIE_SPEED_5_0GT)
5664             pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5665                 RADEON_PCIE_SPEED_50;
5666         else
5667             pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
5668     }
5669     pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5670 
5671     pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5672     pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5673     pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5674     pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5675 
5676     pi->pcie_lane_performance.max = 0;
5677     pi->pcie_lane_performance.min = 16;
5678     pi->pcie_lane_powersaving.max = 0;
5679     pi->pcie_lane_powersaving.min = 16;
5680 
5681     ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5682     if (ret) {
5683         ci_dpm_fini(rdev);
5684         return ret;
5685     }
5686 
5687     ret = r600_get_platform_caps(rdev);
5688     if (ret) {
5689         ci_dpm_fini(rdev);
5690         return ret;
5691     }
5692 
5693     ret = r600_parse_extended_power_table(rdev);
5694     if (ret) {
5695         ci_dpm_fini(rdev);
5696         return ret;
5697     }
5698 
5699     ret = ci_parse_power_table(rdev);
5700     if (ret) {
5701         ci_dpm_fini(rdev);
5702         return ret;
5703     }
5704 
5705     pi->dll_default_on = false;
5706     pi->sram_end = SMC_RAM_END;
5707 
5708     pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5709     pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5710     pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5711     pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5712     pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5713     pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5714     pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5715     pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5716 
5717     pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5718 
5719     pi->sclk_dpm_key_disabled = 0;
5720     pi->mclk_dpm_key_disabled = 0;
5721     pi->pcie_dpm_key_disabled = 0;
5722     pi->thermal_sclk_dpm_enabled = 0;
5723 
5724     /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5725     if ((rdev->pdev->device == 0x6658) &&
5726         (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5727         pi->mclk_dpm_key_disabled = 1;
5728     }
5729 
5730     pi->caps_sclk_ds = true;
5731 
5732     pi->mclk_strobe_mode_threshold = 40000;
5733     pi->mclk_stutter_mode_threshold = 40000;
5734     pi->mclk_edc_enable_threshold = 40000;
5735     pi->mclk_edc_wr_enable_threshold = 40000;
5736 
5737     ci_initialize_powertune_defaults(rdev);
5738 
5739     pi->caps_fps = false;
5740 
5741     pi->caps_sclk_throttle_low_notification = false;
5742 
5743     pi->caps_uvd_dpm = true;
5744     pi->caps_vce_dpm = true;
5745 
5746     ci_get_leakage_voltages(rdev);
5747     ci_patch_dependency_tables_with_leakage(rdev);
5748     ci_set_private_data_variables_based_on_pptable(rdev);
5749 
5750     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5751         kcalloc(4,
5752             sizeof(struct radeon_clock_voltage_dependency_entry),
5753             GFP_KERNEL);
5754     if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5755         ci_dpm_fini(rdev);
5756         return -ENOMEM;
5757     }
5758     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5759     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5760     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5761     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5762     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5763     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5764     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5765     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5766     rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5767 
5768     rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5769     rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5770     rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5771 
5772     rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5773     rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5774     rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5775     rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5776 
5777     if (rdev->family == CHIP_HAWAII) {
5778         pi->thermal_temp_setting.temperature_low = 94500;
5779         pi->thermal_temp_setting.temperature_high = 95000;
5780         pi->thermal_temp_setting.temperature_shutdown = 104000;
5781     } else {
5782         pi->thermal_temp_setting.temperature_low = 99500;
5783         pi->thermal_temp_setting.temperature_high = 100000;
5784         pi->thermal_temp_setting.temperature_shutdown = 104000;
5785     }
5786 
5787     pi->uvd_enabled = false;
5788 
5789     dpm_table = &pi->smc_state_table;
5790 
5791     gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5792     if (gpio.valid) {
5793         dpm_table->VRHotGpio = gpio.shift;
5794         rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5795     } else {
5796         dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5797         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5798     }
5799 
5800     gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5801     if (gpio.valid) {
5802         dpm_table->AcDcGpio = gpio.shift;
5803         rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5804     } else {
5805         dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5806         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5807     }
5808 
5809     gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5810     if (gpio.valid) {
5811         u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5812 
5813         switch (gpio.shift) {
5814         case 0:
5815             tmp &= ~GNB_SLOW_MODE_MASK;
5816             tmp |= GNB_SLOW_MODE(1);
5817             break;
5818         case 1:
5819             tmp &= ~GNB_SLOW_MODE_MASK;
5820             tmp |= GNB_SLOW_MODE(2);
5821             break;
5822         case 2:
5823             tmp |= GNB_SLOW;
5824             break;
5825         case 3:
5826             tmp |= FORCE_NB_PS1;
5827             break;
5828         case 4:
5829             tmp |= DPM_ENABLED;
5830             break;
5831         default:
5832             DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5833             break;
5834         }
5835         WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5836     }
5837 
5838     pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5839     pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5840     pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5841     if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5842         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5843     else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5844         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5845 
5846     if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5847         if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5848             pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5849         else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5850             pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5851         else
5852             rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5853     }
5854 
5855     if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5856         if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5857             pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5858         else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5859             pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5860         else
5861             rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5862     }
5863 
5864     pi->vddc_phase_shed_control = true;
5865 
5866 #if defined(CONFIG_ACPI)
5867     pi->pcie_performance_request =
5868         radeon_acpi_is_pcie_performance_request_supported(rdev);
5869 #else
5870     pi->pcie_performance_request = false;
5871 #endif
5872 
5873     if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5874                    &frev, &crev, &data_offset)) {
5875         pi->caps_sclk_ss_support = true;
5876         pi->caps_mclk_ss_support = true;
5877         pi->dynamic_ss = true;
5878     } else {
5879         pi->caps_sclk_ss_support = false;
5880         pi->caps_mclk_ss_support = false;
5881         pi->dynamic_ss = true;
5882     }
5883 
5884     if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5885         pi->thermal_protection = true;
5886     else
5887         pi->thermal_protection = false;
5888 
5889     pi->caps_dynamic_ac_timing = true;
5890 
5891     pi->uvd_power_gated = false;
5892 
5893     /* make sure dc limits are valid */
5894     if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5895         (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5896         rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5897             rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5898 
5899     pi->fan_ctrl_is_in_default_mode = true;
5900 
5901     return 0;
5902 }
5903 
5904 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5905                             struct seq_file *m)
5906 {
5907     struct ci_power_info *pi = ci_get_pi(rdev);
5908     struct radeon_ps *rps = &pi->current_rps;
5909     u32 sclk = ci_get_average_sclk_freq(rdev);
5910     u32 mclk = ci_get_average_mclk_freq(rdev);
5911 
5912     seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5913     seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5914     seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5915            sclk, mclk);
5916 }
5917 
5918 void ci_dpm_print_power_state(struct radeon_device *rdev,
5919                   struct radeon_ps *rps)
5920 {
5921     struct ci_ps *ps = ci_get_ps(rps);
5922     struct ci_pl *pl;
5923     int i;
5924 
5925     r600_dpm_print_class_info(rps->class, rps->class2);
5926     r600_dpm_print_cap_info(rps->caps);
5927     printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5928     for (i = 0; i < ps->performance_level_count; i++) {
5929         pl = &ps->performance_levels[i];
5930         printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5931                i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5932     }
5933     r600_dpm_print_ps_status(rdev, rps);
5934 }
5935 
5936 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5937 {
5938     u32 sclk = ci_get_average_sclk_freq(rdev);
5939 
5940     return sclk;
5941 }
5942 
5943 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5944 {
5945     u32 mclk = ci_get_average_mclk_freq(rdev);
5946 
5947     return mclk;
5948 }
5949 
5950 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5951 {
5952     struct ci_power_info *pi = ci_get_pi(rdev);
5953     struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5954 
5955     if (low)
5956         return requested_state->performance_levels[0].sclk;
5957     else
5958         return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5959 }
5960 
5961 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5962 {
5963     struct ci_power_info *pi = ci_get_pi(rdev);
5964     struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5965 
5966     if (low)
5967         return requested_state->performance_levels[0].mclk;
5968     else
5969         return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5970 }