Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2013 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/pci.h>
0025 #include <linux/seq_file.h>
0026 
0027 #include "cikd.h"
0028 #include "kv_dpm.h"
0029 #include "r600_dpm.h"
0030 #include "radeon.h"
0031 #include "radeon_asic.h"
0032 
0033 #define KV_MAX_DEEPSLEEP_DIVIDER_ID     5
0034 #define KV_MINIMUM_ENGINE_CLOCK         800
0035 #define SMC_RAM_END                     0x40000
0036 
0037 static int kv_enable_nb_dpm(struct radeon_device *rdev,
0038                 bool enable);
0039 static void kv_init_graphics_levels(struct radeon_device *rdev);
0040 static int kv_calculate_ds_divider(struct radeon_device *rdev);
0041 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
0042 static int kv_calculate_dpm_settings(struct radeon_device *rdev);
0043 static void kv_enable_new_levels(struct radeon_device *rdev);
0044 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
0045                        struct radeon_ps *new_rps);
0046 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
0047 static int kv_set_enabled_levels(struct radeon_device *rdev);
0048 static int kv_force_dpm_highest(struct radeon_device *rdev);
0049 static int kv_force_dpm_lowest(struct radeon_device *rdev);
0050 static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
0051                     struct radeon_ps *new_rps,
0052                     struct radeon_ps *old_rps);
0053 static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
0054                         int min_temp, int max_temp);
0055 static int kv_init_fps_limits(struct radeon_device *rdev);
0056 
0057 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
0058 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
0059 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
0060 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
0061 
0062 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
0063 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
0064 extern void cik_update_cg(struct radeon_device *rdev,
0065               u32 block, bool enable);
0066 
0067 static const struct kv_pt_config_reg didt_config_kv[] =
0068 {
0069     { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0070     { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0071     { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0072     { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0073     { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0074     { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0075     { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0076     { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0077     { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0078     { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0079     { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0080     { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0081     { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
0082     { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
0083     { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
0084     { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0085     { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0086     { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0087     { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0088     { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0089     { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0090     { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0091     { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0092     { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0093     { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0094     { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0095     { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0096     { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0097     { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0098     { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0099     { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
0100     { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
0101     { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
0102     { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0103     { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0104     { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0105     { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0106     { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0107     { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0108     { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0109     { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0110     { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0111     { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0112     { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0113     { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0114     { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0115     { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0116     { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0117     { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
0118     { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
0119     { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
0120     { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0121     { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0122     { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0123     { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0124     { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0125     { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0126     { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0127     { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0128     { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0129     { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0130     { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0131     { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0132     { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
0133     { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
0134     { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
0135     { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
0136     { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
0137     { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
0138     { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0139     { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
0140     { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
0141     { 0xFFFFFFFF }
0142 };
0143 
0144 static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
0145 {
0146     struct kv_ps *ps = rps->ps_priv;
0147 
0148     return ps;
0149 }
0150 
0151 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
0152 {
0153     struct kv_power_info *pi = rdev->pm.dpm.priv;
0154 
0155     return pi;
0156 }
0157 
0158 static int kv_program_pt_config_registers(struct radeon_device *rdev,
0159                       const struct kv_pt_config_reg *cac_config_regs)
0160 {
0161     const struct kv_pt_config_reg *config_regs = cac_config_regs;
0162     u32 data;
0163     u32 cache = 0;
0164 
0165     if (config_regs == NULL)
0166         return -EINVAL;
0167 
0168     while (config_regs->offset != 0xFFFFFFFF) {
0169         if (config_regs->type == KV_CONFIGREG_CACHE) {
0170             cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
0171         } else {
0172             switch (config_regs->type) {
0173             case KV_CONFIGREG_SMC_IND:
0174                 data = RREG32_SMC(config_regs->offset);
0175                 break;
0176             case KV_CONFIGREG_DIDT_IND:
0177                 data = RREG32_DIDT(config_regs->offset);
0178                 break;
0179             default:
0180                 data = RREG32(config_regs->offset << 2);
0181                 break;
0182             }
0183 
0184             data &= ~config_regs->mask;
0185             data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
0186             data |= cache;
0187             cache = 0;
0188 
0189             switch (config_regs->type) {
0190             case KV_CONFIGREG_SMC_IND:
0191                 WREG32_SMC(config_regs->offset, data);
0192                 break;
0193             case KV_CONFIGREG_DIDT_IND:
0194                 WREG32_DIDT(config_regs->offset, data);
0195                 break;
0196             default:
0197                 WREG32(config_regs->offset << 2, data);
0198                 break;
0199             }
0200         }
0201         config_regs++;
0202     }
0203 
0204     return 0;
0205 }
0206 
0207 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
0208 {
0209     struct kv_power_info *pi = kv_get_pi(rdev);
0210     u32 data;
0211 
0212     if (pi->caps_sq_ramping) {
0213         data = RREG32_DIDT(DIDT_SQ_CTRL0);
0214         if (enable)
0215             data |= DIDT_CTRL_EN;
0216         else
0217             data &= ~DIDT_CTRL_EN;
0218         WREG32_DIDT(DIDT_SQ_CTRL0, data);
0219     }
0220 
0221     if (pi->caps_db_ramping) {
0222         data = RREG32_DIDT(DIDT_DB_CTRL0);
0223         if (enable)
0224             data |= DIDT_CTRL_EN;
0225         else
0226             data &= ~DIDT_CTRL_EN;
0227         WREG32_DIDT(DIDT_DB_CTRL0, data);
0228     }
0229 
0230     if (pi->caps_td_ramping) {
0231         data = RREG32_DIDT(DIDT_TD_CTRL0);
0232         if (enable)
0233             data |= DIDT_CTRL_EN;
0234         else
0235             data &= ~DIDT_CTRL_EN;
0236         WREG32_DIDT(DIDT_TD_CTRL0, data);
0237     }
0238 
0239     if (pi->caps_tcp_ramping) {
0240         data = RREG32_DIDT(DIDT_TCP_CTRL0);
0241         if (enable)
0242             data |= DIDT_CTRL_EN;
0243         else
0244             data &= ~DIDT_CTRL_EN;
0245         WREG32_DIDT(DIDT_TCP_CTRL0, data);
0246     }
0247 }
0248 
0249 static int kv_enable_didt(struct radeon_device *rdev, bool enable)
0250 {
0251     struct kv_power_info *pi = kv_get_pi(rdev);
0252     int ret;
0253 
0254     if (pi->caps_sq_ramping ||
0255         pi->caps_db_ramping ||
0256         pi->caps_td_ramping ||
0257         pi->caps_tcp_ramping) {
0258         cik_enter_rlc_safe_mode(rdev);
0259 
0260         if (enable) {
0261             ret = kv_program_pt_config_registers(rdev, didt_config_kv);
0262             if (ret) {
0263                 cik_exit_rlc_safe_mode(rdev);
0264                 return ret;
0265             }
0266         }
0267 
0268         kv_do_enable_didt(rdev, enable);
0269 
0270         cik_exit_rlc_safe_mode(rdev);
0271     }
0272 
0273     return 0;
0274 }
0275 
0276 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
0277 {
0278     struct kv_power_info *pi = kv_get_pi(rdev);
0279     int ret = 0;
0280 
0281     if (pi->caps_cac) {
0282         if (enable) {
0283             ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
0284             if (ret)
0285                 pi->cac_enabled = false;
0286             else
0287                 pi->cac_enabled = true;
0288         } else if (pi->cac_enabled) {
0289             kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
0290             pi->cac_enabled = false;
0291         }
0292     }
0293 
0294     return ret;
0295 }
0296 
0297 static int kv_process_firmware_header(struct radeon_device *rdev)
0298 {
0299     struct kv_power_info *pi = kv_get_pi(rdev);
0300     u32 tmp;
0301     int ret;
0302 
0303     ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
0304                      offsetof(SMU7_Firmware_Header, DpmTable),
0305                      &tmp, pi->sram_end);
0306 
0307     if (ret == 0)
0308         pi->dpm_table_start = tmp;
0309 
0310     ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
0311                      offsetof(SMU7_Firmware_Header, SoftRegisters),
0312                      &tmp, pi->sram_end);
0313 
0314     if (ret == 0)
0315         pi->soft_regs_start = tmp;
0316 
0317     return ret;
0318 }
0319 
0320 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
0321 {
0322     struct kv_power_info *pi = kv_get_pi(rdev);
0323     int ret;
0324 
0325     pi->graphics_voltage_change_enable = 1;
0326 
0327     ret = kv_copy_bytes_to_smc(rdev,
0328                    pi->dpm_table_start +
0329                    offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
0330                    &pi->graphics_voltage_change_enable,
0331                    sizeof(u8), pi->sram_end);
0332 
0333     return ret;
0334 }
0335 
0336 static int kv_set_dpm_interval(struct radeon_device *rdev)
0337 {
0338     struct kv_power_info *pi = kv_get_pi(rdev);
0339     int ret;
0340 
0341     pi->graphics_interval = 1;
0342 
0343     ret = kv_copy_bytes_to_smc(rdev,
0344                    pi->dpm_table_start +
0345                    offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
0346                    &pi->graphics_interval,
0347                    sizeof(u8), pi->sram_end);
0348 
0349     return ret;
0350 }
0351 
0352 static int kv_set_dpm_boot_state(struct radeon_device *rdev)
0353 {
0354     struct kv_power_info *pi = kv_get_pi(rdev);
0355     int ret;
0356 
0357     ret = kv_copy_bytes_to_smc(rdev,
0358                    pi->dpm_table_start +
0359                    offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
0360                    &pi->graphics_boot_level,
0361                    sizeof(u8), pi->sram_end);
0362 
0363     return ret;
0364 }
0365 
0366 static void kv_program_vc(struct radeon_device *rdev)
0367 {
0368     WREG32_SMC(CG_FTV_0, 0x3FFFC100);
0369 }
0370 
0371 static void kv_clear_vc(struct radeon_device *rdev)
0372 {
0373     WREG32_SMC(CG_FTV_0, 0);
0374 }
0375 
0376 static int kv_set_divider_value(struct radeon_device *rdev,
0377                 u32 index, u32 sclk)
0378 {
0379     struct kv_power_info *pi = kv_get_pi(rdev);
0380     struct atom_clock_dividers dividers;
0381     int ret;
0382 
0383     ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0384                          sclk, false, &dividers);
0385     if (ret)
0386         return ret;
0387 
0388     pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
0389     pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
0390 
0391     return 0;
0392 }
0393 
0394 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
0395                    struct sumo_vid_mapping_table *vid_mapping_table,
0396                    u32 vid_2bit)
0397 {
0398     struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
0399         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
0400     u32 i;
0401 
0402     if (vddc_sclk_table && vddc_sclk_table->count) {
0403         if (vid_2bit < vddc_sclk_table->count)
0404             return vddc_sclk_table->entries[vid_2bit].v;
0405         else
0406             return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
0407     } else {
0408         for (i = 0; i < vid_mapping_table->num_entries; i++) {
0409             if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
0410                 return vid_mapping_table->entries[i].vid_7bit;
0411         }
0412         return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
0413     }
0414 }
0415 
0416 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
0417                    struct sumo_vid_mapping_table *vid_mapping_table,
0418                    u32 vid_7bit)
0419 {
0420     struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
0421         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
0422     u32 i;
0423 
0424     if (vddc_sclk_table && vddc_sclk_table->count) {
0425         for (i = 0; i < vddc_sclk_table->count; i++) {
0426             if (vddc_sclk_table->entries[i].v == vid_7bit)
0427                 return i;
0428         }
0429         return vddc_sclk_table->count - 1;
0430     } else {
0431         for (i = 0; i < vid_mapping_table->num_entries; i++) {
0432             if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
0433                 return vid_mapping_table->entries[i].vid_2bit;
0434         }
0435 
0436         return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
0437     }
0438 }
0439 
0440 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
0441                         u16 voltage)
0442 {
0443     return 6200 - (voltage * 25);
0444 }
0445 
0446 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
0447                         u32 vid_2bit)
0448 {
0449     struct kv_power_info *pi = kv_get_pi(rdev);
0450     u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
0451                            &pi->sys_info.vid_mapping_table,
0452                            vid_2bit);
0453 
0454     return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
0455 }
0456 
0457 
0458 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
0459 {
0460     struct kv_power_info *pi = kv_get_pi(rdev);
0461 
0462     pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
0463     pi->graphics_level[index].MinVddNb =
0464         cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
0465 
0466     return 0;
0467 }
0468 
0469 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
0470 {
0471     struct kv_power_info *pi = kv_get_pi(rdev);
0472 
0473     pi->graphics_level[index].AT = cpu_to_be16((u16)at);
0474 
0475     return 0;
0476 }
0477 
0478 static void kv_dpm_power_level_enable(struct radeon_device *rdev,
0479                       u32 index, bool enable)
0480 {
0481     struct kv_power_info *pi = kv_get_pi(rdev);
0482 
0483     pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
0484 }
0485 
0486 static void kv_start_dpm(struct radeon_device *rdev)
0487 {
0488     u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
0489 
0490     tmp |= GLOBAL_PWRMGT_EN;
0491     WREG32_SMC(GENERAL_PWRMGT, tmp);
0492 
0493     kv_smc_dpm_enable(rdev, true);
0494 }
0495 
0496 static void kv_stop_dpm(struct radeon_device *rdev)
0497 {
0498     kv_smc_dpm_enable(rdev, false);
0499 }
0500 
0501 static void kv_start_am(struct radeon_device *rdev)
0502 {
0503     u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
0504 
0505     sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
0506     sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
0507 
0508     WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
0509 }
0510 
0511 static void kv_reset_am(struct radeon_device *rdev)
0512 {
0513     u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
0514 
0515     sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
0516 
0517     WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
0518 }
0519 
0520 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
0521 {
0522     return kv_notify_message_to_smu(rdev, freeze ?
0523                     PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
0524 }
0525 
0526 static int kv_force_lowest_valid(struct radeon_device *rdev)
0527 {
0528     return kv_force_dpm_lowest(rdev);
0529 }
0530 
0531 static int kv_unforce_levels(struct radeon_device *rdev)
0532 {
0533     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
0534         return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
0535     else
0536         return kv_set_enabled_levels(rdev);
0537 }
0538 
0539 static int kv_update_sclk_t(struct radeon_device *rdev)
0540 {
0541     struct kv_power_info *pi = kv_get_pi(rdev);
0542     u32 low_sclk_interrupt_t = 0;
0543     int ret = 0;
0544 
0545     if (pi->caps_sclk_throttle_low_notification) {
0546         low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
0547 
0548         ret = kv_copy_bytes_to_smc(rdev,
0549                        pi->dpm_table_start +
0550                        offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
0551                        (u8 *)&low_sclk_interrupt_t,
0552                        sizeof(u32), pi->sram_end);
0553     }
0554     return ret;
0555 }
0556 
0557 static int kv_program_bootup_state(struct radeon_device *rdev)
0558 {
0559     struct kv_power_info *pi = kv_get_pi(rdev);
0560     u32 i;
0561     struct radeon_clock_voltage_dependency_table *table =
0562         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
0563 
0564     if (table && table->count) {
0565         for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
0566             if (table->entries[i].clk == pi->boot_pl.sclk)
0567                 break;
0568         }
0569 
0570         pi->graphics_boot_level = (u8)i;
0571         kv_dpm_power_level_enable(rdev, i, true);
0572     } else {
0573         struct sumo_sclk_voltage_mapping_table *table =
0574             &pi->sys_info.sclk_voltage_mapping_table;
0575 
0576         if (table->num_max_dpm_entries == 0)
0577             return -EINVAL;
0578 
0579         for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
0580             if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
0581                 break;
0582         }
0583 
0584         pi->graphics_boot_level = (u8)i;
0585         kv_dpm_power_level_enable(rdev, i, true);
0586     }
0587     return 0;
0588 }
0589 
0590 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
0591 {
0592     struct kv_power_info *pi = kv_get_pi(rdev);
0593     int ret;
0594 
0595     pi->graphics_therm_throttle_enable = 1;
0596 
0597     ret = kv_copy_bytes_to_smc(rdev,
0598                    pi->dpm_table_start +
0599                    offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
0600                    &pi->graphics_therm_throttle_enable,
0601                    sizeof(u8), pi->sram_end);
0602 
0603     return ret;
0604 }
0605 
0606 static int kv_upload_dpm_settings(struct radeon_device *rdev)
0607 {
0608     struct kv_power_info *pi = kv_get_pi(rdev);
0609     int ret;
0610 
0611     ret = kv_copy_bytes_to_smc(rdev,
0612                    pi->dpm_table_start +
0613                    offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
0614                    (u8 *)&pi->graphics_level,
0615                    sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
0616                    pi->sram_end);
0617 
0618     if (ret)
0619         return ret;
0620 
0621     ret = kv_copy_bytes_to_smc(rdev,
0622                    pi->dpm_table_start +
0623                    offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
0624                    &pi->graphics_dpm_level_count,
0625                    sizeof(u8), pi->sram_end);
0626 
0627     return ret;
0628 }
0629 
0630 static u32 kv_get_clock_difference(u32 a, u32 b)
0631 {
0632     return (a >= b) ? a - b : b - a;
0633 }
0634 
0635 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
0636 {
0637     struct kv_power_info *pi = kv_get_pi(rdev);
0638     u32 value;
0639 
0640     if (pi->caps_enable_dfs_bypass) {
0641         if (kv_get_clock_difference(clk, 40000) < 200)
0642             value = 3;
0643         else if (kv_get_clock_difference(clk, 30000) < 200)
0644             value = 2;
0645         else if (kv_get_clock_difference(clk, 20000) < 200)
0646             value = 7;
0647         else if (kv_get_clock_difference(clk, 15000) < 200)
0648             value = 6;
0649         else if (kv_get_clock_difference(clk, 10000) < 200)
0650             value = 8;
0651         else
0652             value = 0;
0653     } else {
0654         value = 0;
0655     }
0656 
0657     return value;
0658 }
0659 
0660 static int kv_populate_uvd_table(struct radeon_device *rdev)
0661 {
0662     struct kv_power_info *pi = kv_get_pi(rdev);
0663     struct radeon_uvd_clock_voltage_dependency_table *table =
0664         &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
0665     struct atom_clock_dividers dividers;
0666     int ret;
0667     u32 i;
0668 
0669     if (table == NULL || table->count == 0)
0670         return 0;
0671 
0672     pi->uvd_level_count = 0;
0673     for (i = 0; i < table->count; i++) {
0674         if (pi->high_voltage_t &&
0675             (pi->high_voltage_t < table->entries[i].v))
0676             break;
0677 
0678         pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
0679         pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
0680         pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
0681 
0682         pi->uvd_level[i].VClkBypassCntl =
0683             (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
0684         pi->uvd_level[i].DClkBypassCntl =
0685             (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
0686 
0687         ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0688                              table->entries[i].vclk, false, &dividers);
0689         if (ret)
0690             return ret;
0691         pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
0692 
0693         ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0694                              table->entries[i].dclk, false, &dividers);
0695         if (ret)
0696             return ret;
0697         pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
0698 
0699         pi->uvd_level_count++;
0700     }
0701 
0702     ret = kv_copy_bytes_to_smc(rdev,
0703                    pi->dpm_table_start +
0704                    offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
0705                    (u8 *)&pi->uvd_level_count,
0706                    sizeof(u8), pi->sram_end);
0707     if (ret)
0708         return ret;
0709 
0710     pi->uvd_interval = 1;
0711 
0712     ret = kv_copy_bytes_to_smc(rdev,
0713                    pi->dpm_table_start +
0714                    offsetof(SMU7_Fusion_DpmTable, UVDInterval),
0715                    &pi->uvd_interval,
0716                    sizeof(u8), pi->sram_end);
0717     if (ret)
0718         return ret;
0719 
0720     ret = kv_copy_bytes_to_smc(rdev,
0721                    pi->dpm_table_start +
0722                    offsetof(SMU7_Fusion_DpmTable, UvdLevel),
0723                    (u8 *)&pi->uvd_level,
0724                    sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
0725                    pi->sram_end);
0726 
0727     return ret;
0728 
0729 }
0730 
0731 static int kv_populate_vce_table(struct radeon_device *rdev)
0732 {
0733     struct kv_power_info *pi = kv_get_pi(rdev);
0734     int ret;
0735     u32 i;
0736     struct radeon_vce_clock_voltage_dependency_table *table =
0737         &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
0738     struct atom_clock_dividers dividers;
0739 
0740     if (table == NULL || table->count == 0)
0741         return 0;
0742 
0743     pi->vce_level_count = 0;
0744     for (i = 0; i < table->count; i++) {
0745         if (pi->high_voltage_t &&
0746             pi->high_voltage_t < table->entries[i].v)
0747             break;
0748 
0749         pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
0750         pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
0751 
0752         pi->vce_level[i].ClkBypassCntl =
0753             (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
0754 
0755         ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0756                              table->entries[i].evclk, false, &dividers);
0757         if (ret)
0758             return ret;
0759         pi->vce_level[i].Divider = (u8)dividers.post_div;
0760 
0761         pi->vce_level_count++;
0762     }
0763 
0764     ret = kv_copy_bytes_to_smc(rdev,
0765                    pi->dpm_table_start +
0766                    offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
0767                    (u8 *)&pi->vce_level_count,
0768                    sizeof(u8),
0769                    pi->sram_end);
0770     if (ret)
0771         return ret;
0772 
0773     pi->vce_interval = 1;
0774 
0775     ret = kv_copy_bytes_to_smc(rdev,
0776                    pi->dpm_table_start +
0777                    offsetof(SMU7_Fusion_DpmTable, VCEInterval),
0778                    (u8 *)&pi->vce_interval,
0779                    sizeof(u8),
0780                    pi->sram_end);
0781     if (ret)
0782         return ret;
0783 
0784     ret = kv_copy_bytes_to_smc(rdev,
0785                    pi->dpm_table_start +
0786                    offsetof(SMU7_Fusion_DpmTable, VceLevel),
0787                    (u8 *)&pi->vce_level,
0788                    sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
0789                    pi->sram_end);
0790 
0791     return ret;
0792 }
0793 
0794 static int kv_populate_samu_table(struct radeon_device *rdev)
0795 {
0796     struct kv_power_info *pi = kv_get_pi(rdev);
0797     struct radeon_clock_voltage_dependency_table *table =
0798         &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
0799     struct atom_clock_dividers dividers;
0800     int ret;
0801     u32 i;
0802 
0803     if (table == NULL || table->count == 0)
0804         return 0;
0805 
0806     pi->samu_level_count = 0;
0807     for (i = 0; i < table->count; i++) {
0808         if (pi->high_voltage_t &&
0809             pi->high_voltage_t < table->entries[i].v)
0810             break;
0811 
0812         pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
0813         pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
0814 
0815         pi->samu_level[i].ClkBypassCntl =
0816             (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
0817 
0818         ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0819                              table->entries[i].clk, false, &dividers);
0820         if (ret)
0821             return ret;
0822         pi->samu_level[i].Divider = (u8)dividers.post_div;
0823 
0824         pi->samu_level_count++;
0825     }
0826 
0827     ret = kv_copy_bytes_to_smc(rdev,
0828                    pi->dpm_table_start +
0829                    offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
0830                    (u8 *)&pi->samu_level_count,
0831                    sizeof(u8),
0832                    pi->sram_end);
0833     if (ret)
0834         return ret;
0835 
0836     pi->samu_interval = 1;
0837 
0838     ret = kv_copy_bytes_to_smc(rdev,
0839                    pi->dpm_table_start +
0840                    offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
0841                    (u8 *)&pi->samu_interval,
0842                    sizeof(u8),
0843                    pi->sram_end);
0844     if (ret)
0845         return ret;
0846 
0847     ret = kv_copy_bytes_to_smc(rdev,
0848                    pi->dpm_table_start +
0849                    offsetof(SMU7_Fusion_DpmTable, SamuLevel),
0850                    (u8 *)&pi->samu_level,
0851                    sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
0852                    pi->sram_end);
0853     if (ret)
0854         return ret;
0855 
0856     return ret;
0857 }
0858 
0859 
0860 static int kv_populate_acp_table(struct radeon_device *rdev)
0861 {
0862     struct kv_power_info *pi = kv_get_pi(rdev);
0863     struct radeon_clock_voltage_dependency_table *table =
0864         &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
0865     struct atom_clock_dividers dividers;
0866     int ret;
0867     u32 i;
0868 
0869     if (table == NULL || table->count == 0)
0870         return 0;
0871 
0872     pi->acp_level_count = 0;
0873     for (i = 0; i < table->count; i++) {
0874         pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
0875         pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
0876 
0877         ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
0878                              table->entries[i].clk, false, &dividers);
0879         if (ret)
0880             return ret;
0881         pi->acp_level[i].Divider = (u8)dividers.post_div;
0882 
0883         pi->acp_level_count++;
0884     }
0885 
0886     ret = kv_copy_bytes_to_smc(rdev,
0887                    pi->dpm_table_start +
0888                    offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
0889                    (u8 *)&pi->acp_level_count,
0890                    sizeof(u8),
0891                    pi->sram_end);
0892     if (ret)
0893         return ret;
0894 
0895     pi->acp_interval = 1;
0896 
0897     ret = kv_copy_bytes_to_smc(rdev,
0898                    pi->dpm_table_start +
0899                    offsetof(SMU7_Fusion_DpmTable, ACPInterval),
0900                    (u8 *)&pi->acp_interval,
0901                    sizeof(u8),
0902                    pi->sram_end);
0903     if (ret)
0904         return ret;
0905 
0906     ret = kv_copy_bytes_to_smc(rdev,
0907                    pi->dpm_table_start +
0908                    offsetof(SMU7_Fusion_DpmTable, AcpLevel),
0909                    (u8 *)&pi->acp_level,
0910                    sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
0911                    pi->sram_end);
0912     if (ret)
0913         return ret;
0914 
0915     return ret;
0916 }
0917 
0918 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
0919 {
0920     struct kv_power_info *pi = kv_get_pi(rdev);
0921     u32 i;
0922     struct radeon_clock_voltage_dependency_table *table =
0923         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
0924 
0925     if (table && table->count) {
0926         for (i = 0; i < pi->graphics_dpm_level_count; i++) {
0927             if (pi->caps_enable_dfs_bypass) {
0928                 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
0929                     pi->graphics_level[i].ClkBypassCntl = 3;
0930                 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
0931                     pi->graphics_level[i].ClkBypassCntl = 2;
0932                 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
0933                     pi->graphics_level[i].ClkBypassCntl = 7;
0934                 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
0935                     pi->graphics_level[i].ClkBypassCntl = 6;
0936                 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
0937                     pi->graphics_level[i].ClkBypassCntl = 8;
0938                 else
0939                     pi->graphics_level[i].ClkBypassCntl = 0;
0940             } else {
0941                 pi->graphics_level[i].ClkBypassCntl = 0;
0942             }
0943         }
0944     } else {
0945         struct sumo_sclk_voltage_mapping_table *table =
0946             &pi->sys_info.sclk_voltage_mapping_table;
0947         for (i = 0; i < pi->graphics_dpm_level_count; i++) {
0948             if (pi->caps_enable_dfs_bypass) {
0949                 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
0950                     pi->graphics_level[i].ClkBypassCntl = 3;
0951                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
0952                     pi->graphics_level[i].ClkBypassCntl = 2;
0953                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
0954                     pi->graphics_level[i].ClkBypassCntl = 7;
0955                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
0956                     pi->graphics_level[i].ClkBypassCntl = 6;
0957                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
0958                     pi->graphics_level[i].ClkBypassCntl = 8;
0959                 else
0960                     pi->graphics_level[i].ClkBypassCntl = 0;
0961             } else {
0962                 pi->graphics_level[i].ClkBypassCntl = 0;
0963             }
0964         }
0965     }
0966 }
0967 
0968 static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
0969 {
0970     return kv_notify_message_to_smu(rdev, enable ?
0971                     PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
0972 }
0973 
0974 static void kv_reset_acp_boot_level(struct radeon_device *rdev)
0975 {
0976     struct kv_power_info *pi = kv_get_pi(rdev);
0977 
0978     pi->acp_boot_level = 0xff;
0979 }
0980 
0981 static void kv_update_current_ps(struct radeon_device *rdev,
0982                  struct radeon_ps *rps)
0983 {
0984     struct kv_ps *new_ps = kv_get_ps(rps);
0985     struct kv_power_info *pi = kv_get_pi(rdev);
0986 
0987     pi->current_rps = *rps;
0988     pi->current_ps = *new_ps;
0989     pi->current_rps.ps_priv = &pi->current_ps;
0990 }
0991 
0992 static void kv_update_requested_ps(struct radeon_device *rdev,
0993                    struct radeon_ps *rps)
0994 {
0995     struct kv_ps *new_ps = kv_get_ps(rps);
0996     struct kv_power_info *pi = kv_get_pi(rdev);
0997 
0998     pi->requested_rps = *rps;
0999     pi->requested_ps = *new_ps;
1000     pi->requested_rps.ps_priv = &pi->requested_ps;
1001 }
1002 
1003 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1004 {
1005     struct kv_power_info *pi = kv_get_pi(rdev);
1006     int ret;
1007 
1008     if (pi->bapm_enable) {
1009         ret = kv_smc_bapm_enable(rdev, enable);
1010         if (ret)
1011             DRM_ERROR("kv_smc_bapm_enable failed\n");
1012     }
1013 }
1014 
1015 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
1016 {
1017     u32 thermal_int;
1018 
1019     thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
1020     if (enable)
1021         thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
1022     else
1023         thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
1024     WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
1025 
1026 }
1027 
1028 int kv_dpm_enable(struct radeon_device *rdev)
1029 {
1030     struct kv_power_info *pi = kv_get_pi(rdev);
1031     int ret;
1032 
1033     ret = kv_process_firmware_header(rdev);
1034     if (ret) {
1035         DRM_ERROR("kv_process_firmware_header failed\n");
1036         return ret;
1037     }
1038     kv_init_fps_limits(rdev);
1039     kv_init_graphics_levels(rdev);
1040     ret = kv_program_bootup_state(rdev);
1041     if (ret) {
1042         DRM_ERROR("kv_program_bootup_state failed\n");
1043         return ret;
1044     }
1045     kv_calculate_dfs_bypass_settings(rdev);
1046     ret = kv_upload_dpm_settings(rdev);
1047     if (ret) {
1048         DRM_ERROR("kv_upload_dpm_settings failed\n");
1049         return ret;
1050     }
1051     ret = kv_populate_uvd_table(rdev);
1052     if (ret) {
1053         DRM_ERROR("kv_populate_uvd_table failed\n");
1054         return ret;
1055     }
1056     ret = kv_populate_vce_table(rdev);
1057     if (ret) {
1058         DRM_ERROR("kv_populate_vce_table failed\n");
1059         return ret;
1060     }
1061     ret = kv_populate_samu_table(rdev);
1062     if (ret) {
1063         DRM_ERROR("kv_populate_samu_table failed\n");
1064         return ret;
1065     }
1066     ret = kv_populate_acp_table(rdev);
1067     if (ret) {
1068         DRM_ERROR("kv_populate_acp_table failed\n");
1069         return ret;
1070     }
1071     kv_program_vc(rdev);
1072 
1073     kv_start_am(rdev);
1074     if (pi->enable_auto_thermal_throttling) {
1075         ret = kv_enable_auto_thermal_throttling(rdev);
1076         if (ret) {
1077             DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1078             return ret;
1079         }
1080     }
1081     ret = kv_enable_dpm_voltage_scaling(rdev);
1082     if (ret) {
1083         DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1084         return ret;
1085     }
1086     ret = kv_set_dpm_interval(rdev);
1087     if (ret) {
1088         DRM_ERROR("kv_set_dpm_interval failed\n");
1089         return ret;
1090     }
1091     ret = kv_set_dpm_boot_state(rdev);
1092     if (ret) {
1093         DRM_ERROR("kv_set_dpm_boot_state failed\n");
1094         return ret;
1095     }
1096     ret = kv_enable_ulv(rdev, true);
1097     if (ret) {
1098         DRM_ERROR("kv_enable_ulv failed\n");
1099         return ret;
1100     }
1101     kv_start_dpm(rdev);
1102     ret = kv_enable_didt(rdev, true);
1103     if (ret) {
1104         DRM_ERROR("kv_enable_didt failed\n");
1105         return ret;
1106     }
1107     ret = kv_enable_smc_cac(rdev, true);
1108     if (ret) {
1109         DRM_ERROR("kv_enable_smc_cac failed\n");
1110         return ret;
1111     }
1112 
1113     kv_reset_acp_boot_level(rdev);
1114 
1115     ret = kv_smc_bapm_enable(rdev, false);
1116     if (ret) {
1117         DRM_ERROR("kv_smc_bapm_enable failed\n");
1118         return ret;
1119     }
1120 
1121     kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1122 
1123     return ret;
1124 }
1125 
1126 int kv_dpm_late_enable(struct radeon_device *rdev)
1127 {
1128     int ret = 0;
1129 
1130     if (rdev->irq.installed &&
1131         r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1132         ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1133         if (ret) {
1134             DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1135             return ret;
1136         }
1137         kv_enable_thermal_int(rdev, true);
1138     }
1139 
1140     /* powerdown unused blocks for now */
1141     kv_dpm_powergate_acp(rdev, true);
1142     kv_dpm_powergate_samu(rdev, true);
1143     kv_dpm_powergate_vce(rdev, true);
1144     kv_dpm_powergate_uvd(rdev, true);
1145 
1146     return ret;
1147 }
1148 
1149 void kv_dpm_disable(struct radeon_device *rdev)
1150 {
1151     kv_smc_bapm_enable(rdev, false);
1152 
1153     if (rdev->family == CHIP_MULLINS)
1154         kv_enable_nb_dpm(rdev, false);
1155 
1156     /* powerup blocks */
1157     kv_dpm_powergate_acp(rdev, false);
1158     kv_dpm_powergate_samu(rdev, false);
1159     kv_dpm_powergate_vce(rdev, false);
1160     kv_dpm_powergate_uvd(rdev, false);
1161 
1162     kv_enable_smc_cac(rdev, false);
1163     kv_enable_didt(rdev, false);
1164     kv_clear_vc(rdev);
1165     kv_stop_dpm(rdev);
1166     kv_enable_ulv(rdev, false);
1167     kv_reset_am(rdev);
1168     kv_enable_thermal_int(rdev, false);
1169 
1170     kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1171 }
1172 
1173 static void kv_init_sclk_t(struct radeon_device *rdev)
1174 {
1175     struct kv_power_info *pi = kv_get_pi(rdev);
1176 
1177     pi->low_sclk_interrupt_t = 0;
1178 }
1179 
1180 static int kv_init_fps_limits(struct radeon_device *rdev)
1181 {
1182     struct kv_power_info *pi = kv_get_pi(rdev);
1183     int ret = 0;
1184 
1185     if (pi->caps_fps) {
1186         u16 tmp;
1187 
1188         tmp = 45;
1189         pi->fps_high_t = cpu_to_be16(tmp);
1190         ret = kv_copy_bytes_to_smc(rdev,
1191                        pi->dpm_table_start +
1192                        offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1193                        (u8 *)&pi->fps_high_t,
1194                        sizeof(u16), pi->sram_end);
1195 
1196         tmp = 30;
1197         pi->fps_low_t = cpu_to_be16(tmp);
1198 
1199         ret = kv_copy_bytes_to_smc(rdev,
1200                        pi->dpm_table_start +
1201                        offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1202                        (u8 *)&pi->fps_low_t,
1203                        sizeof(u16), pi->sram_end);
1204 
1205     }
1206     return ret;
1207 }
1208 
1209 static void kv_init_powergate_state(struct radeon_device *rdev)
1210 {
1211     struct kv_power_info *pi = kv_get_pi(rdev);
1212 
1213     pi->uvd_power_gated = false;
1214     pi->vce_power_gated = false;
1215     pi->samu_power_gated = false;
1216     pi->acp_power_gated = false;
1217 
1218 }
1219 
1220 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1221 {
1222     return kv_notify_message_to_smu(rdev, enable ?
1223                     PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1224 }
1225 
1226 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1227 {
1228     return kv_notify_message_to_smu(rdev, enable ?
1229                     PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1230 }
1231 
1232 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1233 {
1234     return kv_notify_message_to_smu(rdev, enable ?
1235                     PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1236 }
1237 
1238 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1239 {
1240     return kv_notify_message_to_smu(rdev, enable ?
1241                     PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1242 }
1243 
1244 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1245 {
1246     struct kv_power_info *pi = kv_get_pi(rdev);
1247     struct radeon_uvd_clock_voltage_dependency_table *table =
1248         &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1249     int ret;
1250     u32 mask;
1251 
1252     if (!gate) {
1253         if (table->count)
1254             pi->uvd_boot_level = table->count - 1;
1255         else
1256             pi->uvd_boot_level = 0;
1257 
1258         if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1259             mask = 1 << pi->uvd_boot_level;
1260         } else {
1261             mask = 0x1f;
1262         }
1263 
1264         ret = kv_copy_bytes_to_smc(rdev,
1265                        pi->dpm_table_start +
1266                        offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1267                        (uint8_t *)&pi->uvd_boot_level,
1268                        sizeof(u8), pi->sram_end);
1269         if (ret)
1270             return ret;
1271 
1272         kv_send_msg_to_smc_with_parameter(rdev,
1273                           PPSMC_MSG_UVDDPM_SetEnabledMask,
1274                           mask);
1275     }
1276 
1277     return kv_enable_uvd_dpm(rdev, !gate);
1278 }
1279 
1280 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
1281 {
1282     u8 i;
1283     struct radeon_vce_clock_voltage_dependency_table *table =
1284         &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1285 
1286     for (i = 0; i < table->count; i++) {
1287         if (table->entries[i].evclk >= evclk)
1288             break;
1289     }
1290 
1291     return i;
1292 }
1293 
1294 static int kv_update_vce_dpm(struct radeon_device *rdev,
1295                  struct radeon_ps *radeon_new_state,
1296                  struct radeon_ps *radeon_current_state)
1297 {
1298     struct kv_power_info *pi = kv_get_pi(rdev);
1299     struct radeon_vce_clock_voltage_dependency_table *table =
1300         &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1301     int ret;
1302 
1303     if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1304         kv_dpm_powergate_vce(rdev, false);
1305         /* turn the clocks on when encoding */
1306         cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
1307         if (pi->caps_stable_p_state)
1308             pi->vce_boot_level = table->count - 1;
1309         else
1310             pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
1311 
1312         ret = kv_copy_bytes_to_smc(rdev,
1313                        pi->dpm_table_start +
1314                        offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1315                        (u8 *)&pi->vce_boot_level,
1316                        sizeof(u8),
1317                        pi->sram_end);
1318         if (ret)
1319             return ret;
1320 
1321         if (pi->caps_stable_p_state)
1322             kv_send_msg_to_smc_with_parameter(rdev,
1323                               PPSMC_MSG_VCEDPM_SetEnabledMask,
1324                               (1 << pi->vce_boot_level));
1325 
1326         kv_enable_vce_dpm(rdev, true);
1327     } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1328         kv_enable_vce_dpm(rdev, false);
1329         /* turn the clocks off when not encoding */
1330         cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
1331         kv_dpm_powergate_vce(rdev, true);
1332     }
1333 
1334     return 0;
1335 }
1336 
1337 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1338 {
1339     struct kv_power_info *pi = kv_get_pi(rdev);
1340     struct radeon_clock_voltage_dependency_table *table =
1341         &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1342     int ret;
1343 
1344     if (!gate) {
1345         if (pi->caps_stable_p_state)
1346             pi->samu_boot_level = table->count - 1;
1347         else
1348             pi->samu_boot_level = 0;
1349 
1350         ret = kv_copy_bytes_to_smc(rdev,
1351                        pi->dpm_table_start +
1352                        offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1353                        (u8 *)&pi->samu_boot_level,
1354                        sizeof(u8),
1355                        pi->sram_end);
1356         if (ret)
1357             return ret;
1358 
1359         if (pi->caps_stable_p_state)
1360             kv_send_msg_to_smc_with_parameter(rdev,
1361                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
1362                               (1 << pi->samu_boot_level));
1363     }
1364 
1365     return kv_enable_samu_dpm(rdev, !gate);
1366 }
1367 
1368 static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1369 {
1370     u8 i;
1371     struct radeon_clock_voltage_dependency_table *table =
1372         &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1373 
1374     for (i = 0; i < table->count; i++) {
1375         if (table->entries[i].clk >= 0) /* XXX */
1376             break;
1377     }
1378 
1379     if (i >= table->count)
1380         i = table->count - 1;
1381 
1382     return i;
1383 }
1384 
1385 static void kv_update_acp_boot_level(struct radeon_device *rdev)
1386 {
1387     struct kv_power_info *pi = kv_get_pi(rdev);
1388     u8 acp_boot_level;
1389 
1390     if (!pi->caps_stable_p_state) {
1391         acp_boot_level = kv_get_acp_boot_level(rdev);
1392         if (acp_boot_level != pi->acp_boot_level) {
1393             pi->acp_boot_level = acp_boot_level;
1394             kv_send_msg_to_smc_with_parameter(rdev,
1395                               PPSMC_MSG_ACPDPM_SetEnabledMask,
1396                               (1 << pi->acp_boot_level));
1397         }
1398     }
1399 }
1400 
1401 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1402 {
1403     struct kv_power_info *pi = kv_get_pi(rdev);
1404     struct radeon_clock_voltage_dependency_table *table =
1405         &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1406     int ret;
1407 
1408     if (!gate) {
1409         if (pi->caps_stable_p_state)
1410             pi->acp_boot_level = table->count - 1;
1411         else
1412             pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1413 
1414         ret = kv_copy_bytes_to_smc(rdev,
1415                        pi->dpm_table_start +
1416                        offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1417                        (u8 *)&pi->acp_boot_level,
1418                        sizeof(u8),
1419                        pi->sram_end);
1420         if (ret)
1421             return ret;
1422 
1423         if (pi->caps_stable_p_state)
1424             kv_send_msg_to_smc_with_parameter(rdev,
1425                               PPSMC_MSG_ACPDPM_SetEnabledMask,
1426                               (1 << pi->acp_boot_level));
1427     }
1428 
1429     return kv_enable_acp_dpm(rdev, !gate);
1430 }
1431 
1432 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1433 {
1434     struct kv_power_info *pi = kv_get_pi(rdev);
1435 
1436     if (pi->uvd_power_gated == gate)
1437         return;
1438 
1439     pi->uvd_power_gated = gate;
1440 
1441     if (gate) {
1442         if (pi->caps_uvd_pg) {
1443             uvd_v1_0_stop(rdev);
1444             cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1445         }
1446         kv_update_uvd_dpm(rdev, gate);
1447         if (pi->caps_uvd_pg)
1448             kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1449     } else {
1450         if (pi->caps_uvd_pg) {
1451             kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1452             uvd_v4_2_resume(rdev);
1453             uvd_v1_0_start(rdev);
1454             cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1455         }
1456         kv_update_uvd_dpm(rdev, gate);
1457     }
1458 }
1459 
1460 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1461 {
1462     struct kv_power_info *pi = kv_get_pi(rdev);
1463 
1464     if (pi->vce_power_gated == gate)
1465         return;
1466 
1467     pi->vce_power_gated = gate;
1468 
1469     if (gate) {
1470         if (pi->caps_vce_pg) {
1471             /* XXX do we need a vce_v1_0_stop() ?  */
1472             kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1473         }
1474     } else {
1475         if (pi->caps_vce_pg) {
1476             kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1477             vce_v2_0_resume(rdev);
1478             vce_v1_0_start(rdev);
1479         }
1480     }
1481 }
1482 
1483 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1484 {
1485     struct kv_power_info *pi = kv_get_pi(rdev);
1486 
1487     if (pi->samu_power_gated == gate)
1488         return;
1489 
1490     pi->samu_power_gated = gate;
1491 
1492     if (gate) {
1493         kv_update_samu_dpm(rdev, true);
1494         if (pi->caps_samu_pg)
1495             kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1496     } else {
1497         if (pi->caps_samu_pg)
1498             kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1499         kv_update_samu_dpm(rdev, false);
1500     }
1501 }
1502 
1503 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1504 {
1505     struct kv_power_info *pi = kv_get_pi(rdev);
1506 
1507     if (pi->acp_power_gated == gate)
1508         return;
1509 
1510     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1511         return;
1512 
1513     pi->acp_power_gated = gate;
1514 
1515     if (gate) {
1516         kv_update_acp_dpm(rdev, true);
1517         if (pi->caps_acp_pg)
1518             kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1519     } else {
1520         if (pi->caps_acp_pg)
1521             kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1522         kv_update_acp_dpm(rdev, false);
1523     }
1524 }
1525 
1526 static void kv_set_valid_clock_range(struct radeon_device *rdev,
1527                      struct radeon_ps *new_rps)
1528 {
1529     struct kv_ps *new_ps = kv_get_ps(new_rps);
1530     struct kv_power_info *pi = kv_get_pi(rdev);
1531     u32 i;
1532     struct radeon_clock_voltage_dependency_table *table =
1533         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1534 
1535     if (table && table->count) {
1536         for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1537             if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1538                 (i == (pi->graphics_dpm_level_count - 1))) {
1539                 pi->lowest_valid = i;
1540                 break;
1541             }
1542         }
1543 
1544         for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1545             if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1546                 break;
1547         }
1548         pi->highest_valid = i;
1549 
1550         if (pi->lowest_valid > pi->highest_valid) {
1551             if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1552                 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1553                 pi->highest_valid = pi->lowest_valid;
1554             else
1555                 pi->lowest_valid =  pi->highest_valid;
1556         }
1557     } else {
1558         struct sumo_sclk_voltage_mapping_table *table =
1559             &pi->sys_info.sclk_voltage_mapping_table;
1560 
1561         for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1562             if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1563                 i == (int)(pi->graphics_dpm_level_count - 1)) {
1564                 pi->lowest_valid = i;
1565                 break;
1566             }
1567         }
1568 
1569         for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1570             if (table->entries[i].sclk_frequency <=
1571                 new_ps->levels[new_ps->num_levels - 1].sclk)
1572                 break;
1573         }
1574         pi->highest_valid = i;
1575 
1576         if (pi->lowest_valid > pi->highest_valid) {
1577             if ((new_ps->levels[0].sclk -
1578                  table->entries[pi->highest_valid].sclk_frequency) >
1579                 (table->entries[pi->lowest_valid].sclk_frequency -
1580                  new_ps->levels[new_ps->num_levels -1].sclk))
1581                 pi->highest_valid = pi->lowest_valid;
1582             else
1583                 pi->lowest_valid =  pi->highest_valid;
1584         }
1585     }
1586 }
1587 
1588 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1589                      struct radeon_ps *new_rps)
1590 {
1591     struct kv_ps *new_ps = kv_get_ps(new_rps);
1592     struct kv_power_info *pi = kv_get_pi(rdev);
1593     int ret = 0;
1594     u8 clk_bypass_cntl;
1595 
1596     if (pi->caps_enable_dfs_bypass) {
1597         clk_bypass_cntl = new_ps->need_dfs_bypass ?
1598             pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1599         ret = kv_copy_bytes_to_smc(rdev,
1600                        (pi->dpm_table_start +
1601                         offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1602                         (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1603                         offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1604                        &clk_bypass_cntl,
1605                        sizeof(u8), pi->sram_end);
1606     }
1607 
1608     return ret;
1609 }
1610 
1611 static int kv_enable_nb_dpm(struct radeon_device *rdev,
1612                 bool enable)
1613 {
1614     struct kv_power_info *pi = kv_get_pi(rdev);
1615     int ret = 0;
1616 
1617     if (enable) {
1618         if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1619             ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1620             if (ret == 0)
1621                 pi->nb_dpm_enabled = true;
1622         }
1623     } else {
1624         if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1625             ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
1626             if (ret == 0)
1627                 pi->nb_dpm_enabled = false;
1628         }
1629     }
1630 
1631     return ret;
1632 }
1633 
1634 int kv_dpm_force_performance_level(struct radeon_device *rdev,
1635                    enum radeon_dpm_forced_level level)
1636 {
1637     int ret;
1638 
1639     if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1640         ret = kv_force_dpm_highest(rdev);
1641         if (ret)
1642             return ret;
1643     } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1644         ret = kv_force_dpm_lowest(rdev);
1645         if (ret)
1646             return ret;
1647     } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1648         ret = kv_unforce_levels(rdev);
1649         if (ret)
1650             return ret;
1651     }
1652 
1653     rdev->pm.dpm.forced_level = level;
1654 
1655     return 0;
1656 }
1657 
1658 int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1659 {
1660     struct kv_power_info *pi = kv_get_pi(rdev);
1661     struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1662     struct radeon_ps *new_ps = &requested_ps;
1663 
1664     kv_update_requested_ps(rdev, new_ps);
1665 
1666     kv_apply_state_adjust_rules(rdev,
1667                     &pi->requested_rps,
1668                     &pi->current_rps);
1669 
1670     return 0;
1671 }
1672 
1673 int kv_dpm_set_power_state(struct radeon_device *rdev)
1674 {
1675     struct kv_power_info *pi = kv_get_pi(rdev);
1676     struct radeon_ps *new_ps = &pi->requested_rps;
1677     struct radeon_ps *old_ps = &pi->current_rps;
1678     int ret;
1679 
1680     if (pi->bapm_enable) {
1681         ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1682         if (ret) {
1683             DRM_ERROR("kv_smc_bapm_enable failed\n");
1684             return ret;
1685         }
1686     }
1687 
1688     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1689         if (pi->enable_dpm) {
1690             kv_set_valid_clock_range(rdev, new_ps);
1691             kv_update_dfs_bypass_settings(rdev, new_ps);
1692             ret = kv_calculate_ds_divider(rdev);
1693             if (ret) {
1694                 DRM_ERROR("kv_calculate_ds_divider failed\n");
1695                 return ret;
1696             }
1697             kv_calculate_nbps_level_settings(rdev);
1698             kv_calculate_dpm_settings(rdev);
1699             kv_force_lowest_valid(rdev);
1700             kv_enable_new_levels(rdev);
1701             kv_upload_dpm_settings(rdev);
1702             kv_program_nbps_index_settings(rdev, new_ps);
1703             kv_unforce_levels(rdev);
1704             kv_set_enabled_levels(rdev);
1705             kv_force_lowest_valid(rdev);
1706             kv_unforce_levels(rdev);
1707 
1708             ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1709             if (ret) {
1710                 DRM_ERROR("kv_update_vce_dpm failed\n");
1711                 return ret;
1712             }
1713             kv_update_sclk_t(rdev);
1714             if (rdev->family == CHIP_MULLINS)
1715                 kv_enable_nb_dpm(rdev, true);
1716         }
1717     } else {
1718         if (pi->enable_dpm) {
1719             kv_set_valid_clock_range(rdev, new_ps);
1720             kv_update_dfs_bypass_settings(rdev, new_ps);
1721             ret = kv_calculate_ds_divider(rdev);
1722             if (ret) {
1723                 DRM_ERROR("kv_calculate_ds_divider failed\n");
1724                 return ret;
1725             }
1726             kv_calculate_nbps_level_settings(rdev);
1727             kv_calculate_dpm_settings(rdev);
1728             kv_freeze_sclk_dpm(rdev, true);
1729             kv_upload_dpm_settings(rdev);
1730             kv_program_nbps_index_settings(rdev, new_ps);
1731             kv_freeze_sclk_dpm(rdev, false);
1732             kv_set_enabled_levels(rdev);
1733             ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1734             if (ret) {
1735                 DRM_ERROR("kv_update_vce_dpm failed\n");
1736                 return ret;
1737             }
1738             kv_update_acp_boot_level(rdev);
1739             kv_update_sclk_t(rdev);
1740             kv_enable_nb_dpm(rdev, true);
1741         }
1742     }
1743 
1744     return 0;
1745 }
1746 
1747 void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1748 {
1749     struct kv_power_info *pi = kv_get_pi(rdev);
1750     struct radeon_ps *new_ps = &pi->requested_rps;
1751 
1752     kv_update_current_ps(rdev, new_ps);
1753 }
1754 
1755 void kv_dpm_setup_asic(struct radeon_device *rdev)
1756 {
1757     sumo_take_smu_control(rdev, true);
1758     kv_init_powergate_state(rdev);
1759     kv_init_sclk_t(rdev);
1760 }
1761 
1762 //XXX use sumo_dpm_display_configuration_changed
1763 
1764 static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1765                         struct radeon_clock_and_voltage_limits *table)
1766 {
1767     struct kv_power_info *pi = kv_get_pi(rdev);
1768 
1769     if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1770         int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1771         table->sclk =
1772             pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1773         table->vddc =
1774             kv_convert_2bit_index_to_voltage(rdev,
1775                              pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1776     }
1777 
1778     table->mclk = pi->sys_info.nbp_memory_clock[0];
1779 }
1780 
1781 static void kv_patch_voltage_values(struct radeon_device *rdev)
1782 {
1783     int i;
1784     struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
1785         &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1786     struct radeon_vce_clock_voltage_dependency_table *vce_table =
1787         &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1788     struct radeon_clock_voltage_dependency_table *samu_table =
1789         &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1790     struct radeon_clock_voltage_dependency_table *acp_table =
1791         &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1792 
1793     if (uvd_table->count) {
1794         for (i = 0; i < uvd_table->count; i++)
1795             uvd_table->entries[i].v =
1796                 kv_convert_8bit_index_to_voltage(rdev,
1797                                  uvd_table->entries[i].v);
1798     }
1799 
1800     if (vce_table->count) {
1801         for (i = 0; i < vce_table->count; i++)
1802             vce_table->entries[i].v =
1803                 kv_convert_8bit_index_to_voltage(rdev,
1804                                  vce_table->entries[i].v);
1805     }
1806 
1807     if (samu_table->count) {
1808         for (i = 0; i < samu_table->count; i++)
1809             samu_table->entries[i].v =
1810                 kv_convert_8bit_index_to_voltage(rdev,
1811                                  samu_table->entries[i].v);
1812     }
1813 
1814     if (acp_table->count) {
1815         for (i = 0; i < acp_table->count; i++)
1816             acp_table->entries[i].v =
1817                 kv_convert_8bit_index_to_voltage(rdev,
1818                                  acp_table->entries[i].v);
1819     }
1820 
1821 }
1822 
1823 static void kv_construct_boot_state(struct radeon_device *rdev)
1824 {
1825     struct kv_power_info *pi = kv_get_pi(rdev);
1826 
1827     pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1828     pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1829     pi->boot_pl.ds_divider_index = 0;
1830     pi->boot_pl.ss_divider_index = 0;
1831     pi->boot_pl.allow_gnb_slow = 1;
1832     pi->boot_pl.force_nbp_state = 0;
1833     pi->boot_pl.display_wm = 0;
1834     pi->boot_pl.vce_wm = 0;
1835 }
1836 
1837 static int kv_force_dpm_highest(struct radeon_device *rdev)
1838 {
1839     int ret;
1840     u32 enable_mask, i;
1841 
1842     ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1843     if (ret)
1844         return ret;
1845 
1846     for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
1847         if (enable_mask & (1 << i))
1848             break;
1849     }
1850 
1851     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1852         return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1853     else
1854         return kv_set_enabled_level(rdev, i);
1855 }
1856 
1857 static int kv_force_dpm_lowest(struct radeon_device *rdev)
1858 {
1859     int ret;
1860     u32 enable_mask, i;
1861 
1862     ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1863     if (ret)
1864         return ret;
1865 
1866     for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
1867         if (enable_mask & (1 << i))
1868             break;
1869     }
1870 
1871     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1872         return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1873     else
1874         return kv_set_enabled_level(rdev, i);
1875 }
1876 
1877 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1878                          u32 sclk, u32 min_sclk_in_sr)
1879 {
1880     struct kv_power_info *pi = kv_get_pi(rdev);
1881     u32 i;
1882     u32 temp;
1883     u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
1884         min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
1885 
1886     if (sclk < min)
1887         return 0;
1888 
1889     if (!pi->caps_sclk_ds)
1890         return 0;
1891 
1892     for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
1893         temp = sclk / sumo_get_sleep_divider_from_id(i);
1894         if (temp >= min)
1895             break;
1896     }
1897 
1898     return (u8)i;
1899 }
1900 
1901 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
1902 {
1903     struct kv_power_info *pi = kv_get_pi(rdev);
1904     struct radeon_clock_voltage_dependency_table *table =
1905         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1906     int i;
1907 
1908     if (table && table->count) {
1909         for (i = table->count - 1; i >= 0; i--) {
1910             if (pi->high_voltage_t &&
1911                 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
1912                  pi->high_voltage_t)) {
1913                 *limit = i;
1914                 return 0;
1915             }
1916         }
1917     } else {
1918         struct sumo_sclk_voltage_mapping_table *table =
1919             &pi->sys_info.sclk_voltage_mapping_table;
1920 
1921         for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
1922             if (pi->high_voltage_t &&
1923                 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
1924                  pi->high_voltage_t)) {
1925                 *limit = i;
1926                 return 0;
1927             }
1928         }
1929     }
1930 
1931     *limit = 0;
1932     return 0;
1933 }
1934 
1935 static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
1936                     struct radeon_ps *new_rps,
1937                     struct radeon_ps *old_rps)
1938 {
1939     struct kv_ps *ps = kv_get_ps(new_rps);
1940     struct kv_power_info *pi = kv_get_pi(rdev);
1941     u32 min_sclk = 10000; /* ??? */
1942     u32 sclk, mclk = 0;
1943     int i, limit;
1944     bool force_high;
1945     struct radeon_clock_voltage_dependency_table *table =
1946         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1947     u32 stable_p_state_sclk = 0;
1948     struct radeon_clock_and_voltage_limits *max_limits =
1949         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1950 
1951     if (new_rps->vce_active) {
1952         new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
1953         new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
1954     } else {
1955         new_rps->evclk = 0;
1956         new_rps->ecclk = 0;
1957     }
1958 
1959     mclk = max_limits->mclk;
1960     sclk = min_sclk;
1961 
1962     if (pi->caps_stable_p_state) {
1963         stable_p_state_sclk = (max_limits->sclk * 75) / 100;
1964 
1965         for (i = table->count - 1; i >= 0; i--) {
1966             if (stable_p_state_sclk >= table->entries[i].clk) {
1967                 stable_p_state_sclk = table->entries[i].clk;
1968                 break;
1969             }
1970         }
1971 
1972         if (i > 0)
1973             stable_p_state_sclk = table->entries[0].clk;
1974 
1975         sclk = stable_p_state_sclk;
1976     }
1977 
1978     if (new_rps->vce_active) {
1979         if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
1980             sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
1981     }
1982 
1983     ps->need_dfs_bypass = true;
1984 
1985     for (i = 0; i < ps->num_levels; i++) {
1986         if (ps->levels[i].sclk < sclk)
1987             ps->levels[i].sclk = sclk;
1988     }
1989 
1990     if (table && table->count) {
1991         for (i = 0; i < ps->num_levels; i++) {
1992             if (pi->high_voltage_t &&
1993                 (pi->high_voltage_t <
1994                  kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
1995                 kv_get_high_voltage_limit(rdev, &limit);
1996                 ps->levels[i].sclk = table->entries[limit].clk;
1997             }
1998         }
1999     } else {
2000         struct sumo_sclk_voltage_mapping_table *table =
2001             &pi->sys_info.sclk_voltage_mapping_table;
2002 
2003         for (i = 0; i < ps->num_levels; i++) {
2004             if (pi->high_voltage_t &&
2005                 (pi->high_voltage_t <
2006                  kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2007                 kv_get_high_voltage_limit(rdev, &limit);
2008                 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2009             }
2010         }
2011     }
2012 
2013     if (pi->caps_stable_p_state) {
2014         for (i = 0; i < ps->num_levels; i++) {
2015             ps->levels[i].sclk = stable_p_state_sclk;
2016         }
2017     }
2018 
2019     pi->video_start = new_rps->dclk || new_rps->vclk ||
2020         new_rps->evclk || new_rps->ecclk;
2021 
2022     if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2023         ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2024         pi->battery_state = true;
2025     else
2026         pi->battery_state = false;
2027 
2028     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2029         ps->dpm0_pg_nb_ps_lo = 0x1;
2030         ps->dpm0_pg_nb_ps_hi = 0x0;
2031         ps->dpmx_nb_ps_lo = 0x1;
2032         ps->dpmx_nb_ps_hi = 0x0;
2033     } else {
2034         ps->dpm0_pg_nb_ps_lo = 0x3;
2035         ps->dpm0_pg_nb_ps_hi = 0x0;
2036         ps->dpmx_nb_ps_lo = 0x3;
2037         ps->dpmx_nb_ps_hi = 0x0;
2038 
2039         if (pi->sys_info.nb_dpm_enable) {
2040             force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2041                 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2042                 pi->disable_nb_ps3_in_battery;
2043             ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2044             ps->dpm0_pg_nb_ps_hi = 0x2;
2045             ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2046             ps->dpmx_nb_ps_hi = 0x2;
2047         }
2048     }
2049 }
2050 
2051 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2052                             u32 index, bool enable)
2053 {
2054     struct kv_power_info *pi = kv_get_pi(rdev);
2055 
2056     pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2057 }
2058 
2059 static int kv_calculate_ds_divider(struct radeon_device *rdev)
2060 {
2061     struct kv_power_info *pi = kv_get_pi(rdev);
2062     u32 sclk_in_sr = 10000; /* ??? */
2063     u32 i;
2064 
2065     if (pi->lowest_valid > pi->highest_valid)
2066         return -EINVAL;
2067 
2068     for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2069         pi->graphics_level[i].DeepSleepDivId =
2070             kv_get_sleep_divider_id_from_clock(rdev,
2071                                be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2072                                sclk_in_sr);
2073     }
2074     return 0;
2075 }
2076 
2077 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2078 {
2079     struct kv_power_info *pi = kv_get_pi(rdev);
2080     u32 i;
2081     bool force_high;
2082     struct radeon_clock_and_voltage_limits *max_limits =
2083         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2084     u32 mclk = max_limits->mclk;
2085 
2086     if (pi->lowest_valid > pi->highest_valid)
2087         return -EINVAL;
2088 
2089     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2090         for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2091             pi->graphics_level[i].GnbSlow = 1;
2092             pi->graphics_level[i].ForceNbPs1 = 0;
2093             pi->graphics_level[i].UpH = 0;
2094         }
2095 
2096         if (!pi->sys_info.nb_dpm_enable)
2097             return 0;
2098 
2099         force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2100                   (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2101 
2102         if (force_high) {
2103             for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2104                 pi->graphics_level[i].GnbSlow = 0;
2105         } else {
2106             if (pi->battery_state)
2107                 pi->graphics_level[0].ForceNbPs1 = 1;
2108 
2109             pi->graphics_level[1].GnbSlow = 0;
2110             pi->graphics_level[2].GnbSlow = 0;
2111             pi->graphics_level[3].GnbSlow = 0;
2112             pi->graphics_level[4].GnbSlow = 0;
2113         }
2114     } else {
2115         for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2116             pi->graphics_level[i].GnbSlow = 1;
2117             pi->graphics_level[i].ForceNbPs1 = 0;
2118             pi->graphics_level[i].UpH = 0;
2119         }
2120 
2121         if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2122             pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2123             pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2124             if (pi->lowest_valid != pi->highest_valid)
2125                 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2126         }
2127     }
2128     return 0;
2129 }
2130 
2131 static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2132 {
2133     struct kv_power_info *pi = kv_get_pi(rdev);
2134     u32 i;
2135 
2136     if (pi->lowest_valid > pi->highest_valid)
2137         return -EINVAL;
2138 
2139     for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2140         pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2141 
2142     return 0;
2143 }
2144 
2145 static void kv_init_graphics_levels(struct radeon_device *rdev)
2146 {
2147     struct kv_power_info *pi = kv_get_pi(rdev);
2148     u32 i;
2149     struct radeon_clock_voltage_dependency_table *table =
2150         &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2151 
2152     if (table && table->count) {
2153         u32 vid_2bit;
2154 
2155         pi->graphics_dpm_level_count = 0;
2156         for (i = 0; i < table->count; i++) {
2157             if (pi->high_voltage_t &&
2158                 (pi->high_voltage_t <
2159                  kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2160                 break;
2161 
2162             kv_set_divider_value(rdev, i, table->entries[i].clk);
2163             vid_2bit = kv_convert_vid7_to_vid2(rdev,
2164                                &pi->sys_info.vid_mapping_table,
2165                                table->entries[i].v);
2166             kv_set_vid(rdev, i, vid_2bit);
2167             kv_set_at(rdev, i, pi->at[i]);
2168             kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2169             pi->graphics_dpm_level_count++;
2170         }
2171     } else {
2172         struct sumo_sclk_voltage_mapping_table *table =
2173             &pi->sys_info.sclk_voltage_mapping_table;
2174 
2175         pi->graphics_dpm_level_count = 0;
2176         for (i = 0; i < table->num_max_dpm_entries; i++) {
2177             if (pi->high_voltage_t &&
2178                 pi->high_voltage_t <
2179                 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2180                 break;
2181 
2182             kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2183             kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2184             kv_set_at(rdev, i, pi->at[i]);
2185             kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2186             pi->graphics_dpm_level_count++;
2187         }
2188     }
2189 
2190     for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2191         kv_dpm_power_level_enable(rdev, i, false);
2192 }
2193 
2194 static void kv_enable_new_levels(struct radeon_device *rdev)
2195 {
2196     struct kv_power_info *pi = kv_get_pi(rdev);
2197     u32 i;
2198 
2199     for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2200         if (i >= pi->lowest_valid && i <= pi->highest_valid)
2201             kv_dpm_power_level_enable(rdev, i, true);
2202     }
2203 }
2204 
2205 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2206 {
2207     u32 new_mask = (1 << level);
2208 
2209     return kv_send_msg_to_smc_with_parameter(rdev,
2210                          PPSMC_MSG_SCLKDPM_SetEnabledMask,
2211                          new_mask);
2212 }
2213 
2214 static int kv_set_enabled_levels(struct radeon_device *rdev)
2215 {
2216     struct kv_power_info *pi = kv_get_pi(rdev);
2217     u32 i, new_mask = 0;
2218 
2219     for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2220         new_mask |= (1 << i);
2221 
2222     return kv_send_msg_to_smc_with_parameter(rdev,
2223                          PPSMC_MSG_SCLKDPM_SetEnabledMask,
2224                          new_mask);
2225 }
2226 
2227 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2228                        struct radeon_ps *new_rps)
2229 {
2230     struct kv_ps *new_ps = kv_get_ps(new_rps);
2231     struct kv_power_info *pi = kv_get_pi(rdev);
2232     u32 nbdpmconfig1;
2233 
2234     if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2235         return;
2236 
2237     if (pi->sys_info.nb_dpm_enable) {
2238         nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2239         nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2240                   DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2241         nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2242                  Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2243                  DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2244                  DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2245         WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2246     }
2247 }
2248 
2249 static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2250                         int min_temp, int max_temp)
2251 {
2252     int low_temp = 0 * 1000;
2253     int high_temp = 255 * 1000;
2254     u32 tmp;
2255 
2256     if (low_temp < min_temp)
2257         low_temp = min_temp;
2258     if (high_temp > max_temp)
2259         high_temp = max_temp;
2260     if (high_temp < low_temp) {
2261         DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2262         return -EINVAL;
2263     }
2264 
2265     tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2266     tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2267     tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2268         DIG_THERM_INTL(49 + (low_temp / 1000)));
2269     WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2270 
2271     rdev->pm.dpm.thermal.min_temp = low_temp;
2272     rdev->pm.dpm.thermal.max_temp = high_temp;
2273 
2274     return 0;
2275 }
2276 
2277 union igp_info {
2278     struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2279     struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2280     struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2281     struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2282     struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2283     struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2284 };
2285 
2286 static int kv_parse_sys_info_table(struct radeon_device *rdev)
2287 {
2288     struct kv_power_info *pi = kv_get_pi(rdev);
2289     struct radeon_mode_info *mode_info = &rdev->mode_info;
2290     int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2291     union igp_info *igp_info;
2292     u8 frev, crev;
2293     u16 data_offset;
2294     int i;
2295 
2296     if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2297                    &frev, &crev, &data_offset)) {
2298         igp_info = (union igp_info *)(mode_info->atom_context->bios +
2299                           data_offset);
2300 
2301         if (crev != 8) {
2302             DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2303             return -EINVAL;
2304         }
2305         pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2306         pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2307         pi->sys_info.bootup_nb_voltage_index =
2308             le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2309         if (igp_info->info_8.ucHtcTmpLmt == 0)
2310             pi->sys_info.htc_tmp_lmt = 203;
2311         else
2312             pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2313         if (igp_info->info_8.ucHtcHystLmt == 0)
2314             pi->sys_info.htc_hyst_lmt = 5;
2315         else
2316             pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2317         if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2318             DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2319         }
2320 
2321         if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2322             pi->sys_info.nb_dpm_enable = true;
2323         else
2324             pi->sys_info.nb_dpm_enable = false;
2325 
2326         for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2327             pi->sys_info.nbp_memory_clock[i] =
2328                 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2329             pi->sys_info.nbp_n_clock[i] =
2330                 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2331         }
2332         if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2333             SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2334             pi->caps_enable_dfs_bypass = true;
2335 
2336         sumo_construct_sclk_voltage_mapping_table(rdev,
2337                               &pi->sys_info.sclk_voltage_mapping_table,
2338                               igp_info->info_8.sAvail_SCLK);
2339 
2340         sumo_construct_vid_mapping_table(rdev,
2341                          &pi->sys_info.vid_mapping_table,
2342                          igp_info->info_8.sAvail_SCLK);
2343 
2344         kv_construct_max_power_limits_table(rdev,
2345                             &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2346     }
2347     return 0;
2348 }
2349 
2350 union power_info {
2351     struct _ATOM_POWERPLAY_INFO info;
2352     struct _ATOM_POWERPLAY_INFO_V2 info_2;
2353     struct _ATOM_POWERPLAY_INFO_V3 info_3;
2354     struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2355     struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2356     struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2357 };
2358 
2359 union pplib_clock_info {
2360     struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2361     struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2362     struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2363     struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2364 };
2365 
2366 union pplib_power_state {
2367     struct _ATOM_PPLIB_STATE v1;
2368     struct _ATOM_PPLIB_STATE_V2 v2;
2369 };
2370 
2371 static void kv_patch_boot_state(struct radeon_device *rdev,
2372                 struct kv_ps *ps)
2373 {
2374     struct kv_power_info *pi = kv_get_pi(rdev);
2375 
2376     ps->num_levels = 1;
2377     ps->levels[0] = pi->boot_pl;
2378 }
2379 
2380 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2381                       struct radeon_ps *rps,
2382                       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2383                       u8 table_rev)
2384 {
2385     struct kv_ps *ps = kv_get_ps(rps);
2386 
2387     rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2388     rps->class = le16_to_cpu(non_clock_info->usClassification);
2389     rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2390 
2391     if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2392         rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2393         rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2394     } else {
2395         rps->vclk = 0;
2396         rps->dclk = 0;
2397     }
2398 
2399     if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2400         rdev->pm.dpm.boot_ps = rps;
2401         kv_patch_boot_state(rdev, ps);
2402     }
2403     if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2404         rdev->pm.dpm.uvd_ps = rps;
2405 }
2406 
2407 static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2408                       struct radeon_ps *rps, int index,
2409                     union pplib_clock_info *clock_info)
2410 {
2411     struct kv_power_info *pi = kv_get_pi(rdev);
2412     struct kv_ps *ps = kv_get_ps(rps);
2413     struct kv_pl *pl = &ps->levels[index];
2414     u32 sclk;
2415 
2416     sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2417     sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2418     pl->sclk = sclk;
2419     pl->vddc_index = clock_info->sumo.vddcIndex;
2420 
2421     ps->num_levels = index + 1;
2422 
2423     if (pi->caps_sclk_ds) {
2424         pl->ds_divider_index = 5;
2425         pl->ss_divider_index = 5;
2426     }
2427 }
2428 
2429 static int kv_parse_power_table(struct radeon_device *rdev)
2430 {
2431     struct radeon_mode_info *mode_info = &rdev->mode_info;
2432     struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2433     union pplib_power_state *power_state;
2434     int i, j, k, non_clock_array_index, clock_array_index;
2435     union pplib_clock_info *clock_info;
2436     struct _StateArray *state_array;
2437     struct _ClockInfoArray *clock_info_array;
2438     struct _NonClockInfoArray *non_clock_info_array;
2439     union power_info *power_info;
2440     int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2441     u16 data_offset;
2442     u8 frev, crev;
2443     u8 *power_state_offset;
2444     struct kv_ps *ps;
2445 
2446     if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2447                    &frev, &crev, &data_offset))
2448         return -EINVAL;
2449     power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2450 
2451     state_array = (struct _StateArray *)
2452         (mode_info->atom_context->bios + data_offset +
2453          le16_to_cpu(power_info->pplib.usStateArrayOffset));
2454     clock_info_array = (struct _ClockInfoArray *)
2455         (mode_info->atom_context->bios + data_offset +
2456          le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2457     non_clock_info_array = (struct _NonClockInfoArray *)
2458         (mode_info->atom_context->bios + data_offset +
2459          le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2460 
2461     rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
2462                   sizeof(struct radeon_ps),
2463                   GFP_KERNEL);
2464     if (!rdev->pm.dpm.ps)
2465         return -ENOMEM;
2466     power_state_offset = (u8 *)state_array->states;
2467     for (i = 0; i < state_array->ucNumEntries; i++) {
2468         u8 *idx;
2469         power_state = (union pplib_power_state *)power_state_offset;
2470         non_clock_array_index = power_state->v2.nonClockInfoIndex;
2471         non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2472             &non_clock_info_array->nonClockInfo[non_clock_array_index];
2473         if (!rdev->pm.power_state[i].clock_info)
2474             return -EINVAL;
2475         ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2476         if (ps == NULL) {
2477             kfree(rdev->pm.dpm.ps);
2478             return -ENOMEM;
2479         }
2480         rdev->pm.dpm.ps[i].ps_priv = ps;
2481         k = 0;
2482         idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2483         for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2484             clock_array_index = idx[j];
2485             if (clock_array_index >= clock_info_array->ucNumEntries)
2486                 continue;
2487             if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2488                 break;
2489             clock_info = (union pplib_clock_info *)
2490                 ((u8 *)&clock_info_array->clockInfo[0] +
2491                  (clock_array_index * clock_info_array->ucEntrySize));
2492             kv_parse_pplib_clock_info(rdev,
2493                           &rdev->pm.dpm.ps[i], k,
2494                           clock_info);
2495             k++;
2496         }
2497         kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2498                           non_clock_info,
2499                           non_clock_info_array->ucEntrySize);
2500         power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2501     }
2502     rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2503 
2504     /* fill in the vce power states */
2505     for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
2506         u32 sclk;
2507         clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
2508         clock_info = (union pplib_clock_info *)
2509             &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2510         sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2511         sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2512         rdev->pm.dpm.vce_states[i].sclk = sclk;
2513         rdev->pm.dpm.vce_states[i].mclk = 0;
2514     }
2515 
2516     return 0;
2517 }
2518 
2519 int kv_dpm_init(struct radeon_device *rdev)
2520 {
2521     struct kv_power_info *pi;
2522     int ret, i;
2523 
2524     pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2525     if (pi == NULL)
2526         return -ENOMEM;
2527     rdev->pm.dpm.priv = pi;
2528 
2529     ret = r600_get_platform_caps(rdev);
2530     if (ret)
2531         return ret;
2532 
2533     ret = r600_parse_extended_power_table(rdev);
2534     if (ret)
2535         return ret;
2536 
2537     for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2538         pi->at[i] = TRINITY_AT_DFLT;
2539 
2540     pi->sram_end = SMC_RAM_END;
2541 
2542     /* Enabling nb dpm on an asrock system prevents dpm from working */
2543     if (rdev->pdev->subsystem_vendor == 0x1849)
2544         pi->enable_nb_dpm = false;
2545     else
2546         pi->enable_nb_dpm = true;
2547 
2548     pi->caps_power_containment = true;
2549     pi->caps_cac = true;
2550     pi->enable_didt = false;
2551     if (pi->enable_didt) {
2552         pi->caps_sq_ramping = true;
2553         pi->caps_db_ramping = true;
2554         pi->caps_td_ramping = true;
2555         pi->caps_tcp_ramping = true;
2556     }
2557 
2558     pi->caps_sclk_ds = true;
2559     pi->enable_auto_thermal_throttling = true;
2560     pi->disable_nb_ps3_in_battery = false;
2561     if (radeon_bapm == -1) {
2562         /* only enable bapm on KB, ML by default */
2563         if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2564             pi->bapm_enable = true;
2565         else
2566             pi->bapm_enable = false;
2567     } else if (radeon_bapm == 0) {
2568         pi->bapm_enable = false;
2569     } else {
2570         pi->bapm_enable = true;
2571     }
2572     pi->voltage_drop_t = 0;
2573     pi->caps_sclk_throttle_low_notification = false;
2574     pi->caps_fps = false; /* true? */
2575     pi->caps_uvd_pg = true;
2576     pi->caps_uvd_dpm = true;
2577     pi->caps_vce_pg = false; /* XXX true */
2578     pi->caps_samu_pg = false;
2579     pi->caps_acp_pg = false;
2580     pi->caps_stable_p_state = false;
2581 
2582     ret = kv_parse_sys_info_table(rdev);
2583     if (ret)
2584         return ret;
2585 
2586     kv_patch_voltage_values(rdev);
2587     kv_construct_boot_state(rdev);
2588 
2589     ret = kv_parse_power_table(rdev);
2590     if (ret)
2591         return ret;
2592 
2593     pi->enable_dpm = true;
2594 
2595     return 0;
2596 }
2597 
2598 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2599                             struct seq_file *m)
2600 {
2601     struct kv_power_info *pi = kv_get_pi(rdev);
2602     u32 current_index =
2603         (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2604         CURR_SCLK_INDEX_SHIFT;
2605     u32 sclk, tmp;
2606     u16 vddc;
2607 
2608     if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2609         seq_printf(m, "invalid dpm profile %d\n", current_index);
2610     } else {
2611         sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2612         tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2613             SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2614         vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2615         seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2616         seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
2617         seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
2618                current_index, sclk, vddc);
2619     }
2620 }
2621 
2622 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
2623 {
2624     struct kv_power_info *pi = kv_get_pi(rdev);
2625     u32 current_index =
2626         (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2627         CURR_SCLK_INDEX_SHIFT;
2628     u32 sclk;
2629 
2630     if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2631         return 0;
2632     } else {
2633         sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2634         return sclk;
2635     }
2636 }
2637 
2638 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
2639 {
2640     struct kv_power_info *pi = kv_get_pi(rdev);
2641 
2642     return pi->sys_info.bootup_uma_clk;
2643 }
2644 
2645 void kv_dpm_print_power_state(struct radeon_device *rdev,
2646                   struct radeon_ps *rps)
2647 {
2648     int i;
2649     struct kv_ps *ps = kv_get_ps(rps);
2650 
2651     r600_dpm_print_class_info(rps->class, rps->class2);
2652     r600_dpm_print_cap_info(rps->caps);
2653     printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2654     for (i = 0; i < ps->num_levels; i++) {
2655         struct kv_pl *pl = &ps->levels[i];
2656         printk("\t\tpower level %d    sclk: %u vddc: %u\n",
2657                i, pl->sclk,
2658                kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2659     }
2660     r600_dpm_print_ps_status(rdev, rps);
2661 }
2662 
2663 void kv_dpm_fini(struct radeon_device *rdev)
2664 {
2665     int i;
2666 
2667     for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2668         kfree(rdev->pm.dpm.ps[i].ps_priv);
2669     }
2670     kfree(rdev->pm.dpm.ps);
2671     kfree(rdev->pm.dpm.priv);
2672     r600_free_extended_power_table(rdev);
2673 }
2674 
2675 void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2676 {
2677 
2678 }
2679 
2680 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2681 {
2682     struct kv_power_info *pi = kv_get_pi(rdev);
2683     struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2684 
2685     if (low)
2686         return requested_state->levels[0].sclk;
2687     else
2688         return requested_state->levels[requested_state->num_levels - 1].sclk;
2689 }
2690 
2691 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2692 {
2693     struct kv_power_info *pi = kv_get_pi(rdev);
2694 
2695     return pi->sys_info.bootup_uma_clk;
2696 }
2697