Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2012-16 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: AMD
0023  *
0024  */
0025 
0026 #include <linux/slab.h>
0027 
0028 #include "dce_clk_mgr.h"
0029 
0030 #include "reg_helper.h"
0031 #include "dmcu.h"
0032 #include "core_types.h"
0033 #include "dal_asic_id.h"
0034 
0035 #define TO_DCE_CLK_MGR(clocks)\
0036     container_of(clocks, struct dce_clk_mgr, base)
0037 
0038 #define REG(reg) \
0039     (clk_mgr_dce->regs->reg)
0040 
0041 #undef FN
0042 #define FN(reg_name, field_name) \
0043     clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
0044 
0045 #define CTX \
0046     clk_mgr_dce->base.ctx
0047 #define DC_LOGGER \
0048     clk_mgr->ctx->logger
0049 
0050 /* Max clock values for each state indexed by "enum clocks_state": */
0051 static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
0052 /* ClocksStateInvalid - should not be used */
0053 { .display_clk_khz = 0, .pixel_clk_khz = 0 },
0054 /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
0055 { .display_clk_khz = 0, .pixel_clk_khz = 0 },
0056 /* ClocksStateLow */
0057 { .display_clk_khz = 352000, .pixel_clk_khz = 330000},
0058 /* ClocksStateNominal */
0059 { .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
0060 /* ClocksStatePerformance */
0061 { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
0062 
0063 static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
0064 /*ClocksStateInvalid - should not be used*/
0065 { .display_clk_khz = 0, .pixel_clk_khz = 0 },
0066 /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
0067 { .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
0068 /*ClocksStateLow*/
0069 { .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
0070 /*ClocksStateNominal*/
0071 { .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
0072 /*ClocksStatePerformance*/
0073 { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
0074 
0075 static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
0076 /*ClocksStateInvalid - should not be used*/
0077 { .display_clk_khz = 0, .pixel_clk_khz = 0 },
0078 /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
0079 { .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
0080 /*ClocksStateLow*/
0081 { .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
0082 /*ClocksStateNominal*/
0083 { .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
0084 /*ClocksStatePerformance*/
0085 { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
0086 
0087 static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
0088 /*ClocksStateInvalid - should not be used*/
0089 { .display_clk_khz = 0, .pixel_clk_khz = 0 },
0090 /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
0091 { .display_clk_khz = 0, .pixel_clk_khz = 0 },
0092 /*ClocksStateLow*/
0093 { .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
0094 /*ClocksStateNominal*/
0095 { .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
0096 /*ClocksStatePerformance*/
0097 { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
0098 
0099 int dentist_get_divider_from_did(int did)
0100 {
0101     if (did < DENTIST_BASE_DID_1)
0102         did = DENTIST_BASE_DID_1;
0103     if (did > DENTIST_MAX_DID)
0104         did = DENTIST_MAX_DID;
0105 
0106     if (did < DENTIST_BASE_DID_2) {
0107         return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
0108                             * (did - DENTIST_BASE_DID_1);
0109     } else if (did < DENTIST_BASE_DID_3) {
0110         return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
0111                             * (did - DENTIST_BASE_DID_2);
0112     } else if (did < DENTIST_BASE_DID_4) {
0113         return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
0114                             * (did - DENTIST_BASE_DID_3);
0115     } else {
0116         return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
0117                             * (did - DENTIST_BASE_DID_4);
0118     }
0119 }
0120 
0121 /* SW will adjust DP REF Clock average value for all purposes
0122  * (DP DTO / DP Audio DTO and DP GTC)
0123  if clock is spread for all cases:
0124  -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
0125  calculations for DS_INCR/DS_MODULO (this is planned to be default case)
0126  -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
0127  calculations (not planned to be used, but average clock should still
0128  be valid)
0129  -if SS enabled on DP Ref clock and HW de-spreading disabled
0130  (should not be case with CIK) then SW should program all rates
0131  generated according to average value (case as with previous ASICs)
0132   */
0133 static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
0134 {
0135     if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
0136         struct fixed31_32 ss_percentage = dc_fixpt_div_int(
0137                 dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
0138                             clk_mgr_dce->dprefclk_ss_divider), 200);
0139         struct fixed31_32 adj_dp_ref_clk_khz;
0140 
0141         ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
0142         adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
0143         dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
0144     }
0145     return dp_ref_clk_khz;
0146 }
0147 
0148 static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
0149 {
0150     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0151     int dprefclk_wdivider;
0152     int dprefclk_src_sel;
0153     int dp_ref_clk_khz = 600000;
0154     int target_div;
0155 
0156     /* ASSERT DP Reference Clock source is from DFS*/
0157     REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
0158     ASSERT(dprefclk_src_sel == 0);
0159 
0160     /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
0161      * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
0162     REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
0163 
0164     /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
0165     target_div = dentist_get_divider_from_did(dprefclk_wdivider);
0166 
0167     /* Calculate the current DFS clock, in kHz.*/
0168     dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
0169         * clk_mgr_dce->dentist_vco_freq_khz) / target_div;
0170 
0171     return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
0172 }
0173 
0174 int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
0175 {
0176     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0177 
0178     return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
0179 }
0180 
0181 /* unit: in_khz before mode set, get pixel clock from context. ASIC register
0182  * may not be programmed yet
0183  */
0184 static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
0185 {
0186     uint32_t max_pix_clk = 0;
0187     int i;
0188 
0189     for (i = 0; i < MAX_PIPES; i++) {
0190         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
0191 
0192         if (pipe_ctx->stream == NULL)
0193             continue;
0194 
0195         /* do not check under lay */
0196         if (pipe_ctx->top_pipe)
0197             continue;
0198 
0199         if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk)
0200             max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
0201 
0202         /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
0203          * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
0204          */
0205         if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
0206                 pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
0207             max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
0208     }
0209 
0210     return max_pix_clk;
0211 }
0212 
0213 static enum dm_pp_clocks_state dce_get_required_clocks_state(
0214     struct clk_mgr *clk_mgr,
0215     struct dc_state *context)
0216 {
0217     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0218     int i;
0219     enum dm_pp_clocks_state low_req_clk;
0220     int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
0221 
0222     /* Iterate from highest supported to lowest valid state, and update
0223      * lowest RequiredState with the lowest state that satisfies
0224      * all required clocks
0225      */
0226     for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
0227         if (context->bw_ctx.bw.dce.dispclk_khz >
0228                 clk_mgr_dce->max_clks_by_state[i].display_clk_khz
0229             || max_pix_clk >
0230                 clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
0231             break;
0232 
0233     low_req_clk = i + 1;
0234     if (low_req_clk > clk_mgr_dce->max_clks_state) {
0235         /* set max clock state for high phyclock, invalid on exceeding display clock */
0236         if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
0237                 < context->bw_ctx.bw.dce.dispclk_khz)
0238             low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
0239         else
0240             low_req_clk = clk_mgr_dce->max_clks_state;
0241     }
0242 
0243     return low_req_clk;
0244 }
0245 
0246 static int dce_set_clock(
0247     struct clk_mgr *clk_mgr,
0248     int requested_clk_khz)
0249 {
0250     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0251     struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
0252     struct dc_bios *bp = clk_mgr->ctx->dc_bios;
0253     int actual_clock = requested_clk_khz;
0254     struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
0255 
0256     /* Make sure requested clock isn't lower than minimum threshold*/
0257     if (requested_clk_khz > 0)
0258         requested_clk_khz = max(requested_clk_khz,
0259                 clk_mgr_dce->dentist_vco_freq_khz / 64);
0260 
0261     /* Prepare to program display clock*/
0262     pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
0263     pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
0264 
0265     if (clk_mgr_dce->dfs_bypass_active)
0266         pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
0267 
0268     bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
0269 
0270     if (clk_mgr_dce->dfs_bypass_active) {
0271         /* Cache the fixed display clock*/
0272         clk_mgr_dce->dfs_bypass_disp_clk =
0273             pxl_clk_params.dfs_bypass_display_clock;
0274         actual_clock = pxl_clk_params.dfs_bypass_display_clock;
0275     }
0276 
0277     /* from power down, we need mark the clock state as ClocksStateNominal
0278      * from HWReset, so when resume we will call pplib voltage regulator.*/
0279     if (requested_clk_khz == 0)
0280         clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
0281 
0282     if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
0283         dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
0284 
0285     return actual_clock;
0286 }
0287 
0288 int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
0289 {
0290     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0291     struct bp_set_dce_clock_parameters dce_clk_params;
0292     struct dc_bios *bp = clk_mgr->ctx->dc_bios;
0293     struct dc *core_dc = clk_mgr->ctx->dc;
0294     struct dmcu *dmcu = core_dc->res_pool->dmcu;
0295     int actual_clock = requested_clk_khz;
0296     /* Prepare to program display clock*/
0297     memset(&dce_clk_params, 0, sizeof(dce_clk_params));
0298 
0299     /* Make sure requested clock isn't lower than minimum threshold*/
0300     if (requested_clk_khz > 0)
0301         requested_clk_khz = max(requested_clk_khz,
0302                 clk_mgr_dce->dentist_vco_freq_khz / 62);
0303 
0304     dce_clk_params.target_clock_frequency = requested_clk_khz;
0305     dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
0306     dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
0307 
0308     bp->funcs->set_dce_clock(bp, &dce_clk_params);
0309     actual_clock = dce_clk_params.target_clock_frequency;
0310 
0311     /* from power down, we need mark the clock state as ClocksStateNominal
0312      * from HWReset, so when resume we will call pplib voltage regulator.*/
0313     if (requested_clk_khz == 0)
0314         clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
0315 
0316     /*Program DP ref Clock*/
0317     /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
0318     dce_clk_params.target_clock_frequency = 0;
0319     dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
0320 
0321     if (!((clk_mgr->ctx->asic_id.chip_family == FAMILY_AI) &&
0322            ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev)))
0323         dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
0324             (dce_clk_params.pll_id ==
0325                     CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
0326     else
0327         dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
0328 
0329     bp->funcs->set_dce_clock(bp, &dce_clk_params);
0330 
0331     if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
0332         if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
0333             if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
0334                 dmcu->funcs->set_psr_wait_loop(dmcu,
0335                         actual_clock / 1000 / 7);
0336         }
0337     }
0338 
0339     clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
0340     return actual_clock;
0341 }
0342 
0343 static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
0344 {
0345     struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
0346     struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
0347     struct integrated_info info = { { { 0 } } };
0348     struct dc_firmware_info fw_info = { { 0 } };
0349     int i;
0350 
0351     if (bp->integrated_info)
0352         info = *bp->integrated_info;
0353 
0354     clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
0355     if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
0356         bp->funcs->get_firmware_info(bp, &fw_info);
0357         clk_mgr_dce->dentist_vco_freq_khz =
0358             fw_info.smu_gpu_pll_output_freq;
0359         if (clk_mgr_dce->dentist_vco_freq_khz == 0)
0360             clk_mgr_dce->dentist_vco_freq_khz = 3600000;
0361     }
0362 
0363     /*update the maximum display clock for each power state*/
0364     for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
0365         enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
0366 
0367         switch (i) {
0368         case 0:
0369             clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
0370             break;
0371 
0372         case 1:
0373             clk_state = DM_PP_CLOCKS_STATE_LOW;
0374             break;
0375 
0376         case 2:
0377             clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
0378             break;
0379 
0380         case 3:
0381             clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
0382             break;
0383 
0384         default:
0385             clk_state = DM_PP_CLOCKS_STATE_INVALID;
0386             break;
0387         }
0388 
0389         /*Do not allow bad VBIOS/SBIOS to override with invalid values,
0390          * check for > 100MHz*/
0391         if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
0392             clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
0393                 info.disp_clk_voltage[i].max_supported_clk;
0394     }
0395 
0396     if (!debug->disable_dfs_bypass && bp->integrated_info)
0397         if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
0398             clk_mgr_dce->dfs_bypass_enabled = true;
0399 }
0400 
0401 void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
0402 {
0403     struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
0404     int ss_info_num = bp->funcs->get_ss_entry_number(
0405             bp, AS_SIGNAL_TYPE_GPU_PLL);
0406 
0407     if (ss_info_num) {
0408         struct spread_spectrum_info info = { { 0 } };
0409         enum bp_result result = bp->funcs->get_spread_spectrum_info(
0410                 bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
0411 
0412         /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
0413          * even if SS not enabled and in that case
0414          * SSInfo.spreadSpectrumPercentage !=0 would be sign
0415          * that SS is enabled
0416          */
0417         if (result == BP_RESULT_OK &&
0418                 info.spread_spectrum_percentage != 0) {
0419             clk_mgr_dce->ss_on_dprefclk = true;
0420             clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
0421 
0422             if (info.type.CENTER_MODE == 0) {
0423                 /* TODO: Currently for DP Reference clock we
0424                  * need only SS percentage for
0425                  * downspread */
0426                 clk_mgr_dce->dprefclk_ss_percentage =
0427                         info.spread_spectrum_percentage;
0428             }
0429 
0430             return;
0431         }
0432 
0433         result = bp->funcs->get_spread_spectrum_info(
0434                 bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
0435 
0436         /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
0437          * even if SS not enabled and in that case
0438          * SSInfo.spreadSpectrumPercentage !=0 would be sign
0439          * that SS is enabled
0440          */
0441         if (result == BP_RESULT_OK &&
0442                 info.spread_spectrum_percentage != 0) {
0443             clk_mgr_dce->ss_on_dprefclk = true;
0444             clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
0445 
0446             if (info.type.CENTER_MODE == 0) {
0447                 /* Currently for DP Reference clock we
0448                  * need only SS percentage for
0449                  * downspread */
0450                 clk_mgr_dce->dprefclk_ss_percentage =
0451                         info.spread_spectrum_percentage;
0452             }
0453             if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
0454                 clk_mgr_dce->dprefclk_ss_percentage = 0;
0455         }
0456     }
0457 }
0458 
0459 /**
0460  * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info
0461  * @clk_mgr: clock manager base structure
0462  *
0463  * Reads from VBIOS the XGMI spread spectrum info and saves it within
0464  * the dce clock manager. This operation will overwrite the existing dprefclk
0465  * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also
0466  * sets the ->xgmi_enabled flag.
0467  */
0468 void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr)
0469 {
0470     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0471     enum bp_result result;
0472     struct spread_spectrum_info info = { { 0 } };
0473     struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
0474 
0475     clk_mgr_dce->xgmi_enabled = false;
0476 
0477     result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI,
0478                              0, &info);
0479     if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) {
0480         clk_mgr_dce->xgmi_enabled = true;
0481         clk_mgr_dce->ss_on_dprefclk = true;
0482         clk_mgr_dce->dprefclk_ss_divider =
0483                 info.spread_percentage_divider;
0484 
0485         if (info.type.CENTER_MODE == 0) {
0486             /* Currently for DP Reference clock we
0487              * need only SS percentage for
0488              * downspread */
0489             clk_mgr_dce->dprefclk_ss_percentage =
0490                     info.spread_spectrum_percentage;
0491         }
0492     }
0493 }
0494 
0495 void dce110_fill_display_configs(
0496     const struct dc_state *context,
0497     struct dm_pp_display_configuration *pp_display_cfg)
0498 {
0499     int j;
0500     int num_cfgs = 0;
0501 
0502     for (j = 0; j < context->stream_count; j++) {
0503         int k;
0504 
0505         const struct dc_stream_state *stream = context->streams[j];
0506         struct dm_pp_single_disp_config *cfg =
0507             &pp_display_cfg->disp_configs[num_cfgs];
0508         const struct pipe_ctx *pipe_ctx = NULL;
0509 
0510         for (k = 0; k < MAX_PIPES; k++)
0511             if (stream == context->res_ctx.pipe_ctx[k].stream) {
0512                 pipe_ctx = &context->res_ctx.pipe_ctx[k];
0513                 break;
0514             }
0515 
0516         ASSERT(pipe_ctx != NULL);
0517 
0518         /* only notify active stream */
0519         if (stream->dpms_off)
0520             continue;
0521 
0522         num_cfgs++;
0523         cfg->signal = pipe_ctx->stream->signal;
0524         cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
0525         cfg->src_height = stream->src.height;
0526         cfg->src_width = stream->src.width;
0527         cfg->ddi_channel_mapping =
0528             stream->link->ddi_channel_mapping.raw;
0529         cfg->transmitter =
0530             stream->link->link_enc->transmitter;
0531         cfg->link_settings.lane_count =
0532             stream->link->cur_link_settings.lane_count;
0533         cfg->link_settings.link_rate =
0534             stream->link->cur_link_settings.link_rate;
0535         cfg->link_settings.link_spread =
0536             stream->link->cur_link_settings.link_spread;
0537         cfg->sym_clock = stream->phy_pix_clk;
0538         /* Round v_refresh*/
0539         cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
0540         cfg->v_refresh /= stream->timing.h_total;
0541         cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
0542                             / stream->timing.v_total;
0543     }
0544 
0545     pp_display_cfg->display_count = num_cfgs;
0546 }
0547 
0548 static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
0549 {
0550     uint8_t j;
0551     uint32_t min_vertical_blank_time = -1;
0552 
0553     for (j = 0; j < context->stream_count; j++) {
0554         struct dc_stream_state *stream = context->streams[j];
0555         uint32_t vertical_blank_in_pixels = 0;
0556         uint32_t vertical_blank_time = 0;
0557 
0558         vertical_blank_in_pixels = stream->timing.h_total *
0559             (stream->timing.v_total
0560              - stream->timing.v_addressable);
0561 
0562         vertical_blank_time = vertical_blank_in_pixels
0563             * 10000 / stream->timing.pix_clk_100hz;
0564 
0565         if (min_vertical_blank_time > vertical_blank_time)
0566             min_vertical_blank_time = vertical_blank_time;
0567     }
0568 
0569     return min_vertical_blank_time;
0570 }
0571 
0572 static int determine_sclk_from_bounding_box(
0573         const struct dc *dc,
0574         int required_sclk)
0575 {
0576     int i;
0577 
0578     /*
0579      * Some asics do not give us sclk levels, so we just report the actual
0580      * required sclk
0581      */
0582     if (dc->sclk_lvls.num_levels == 0)
0583         return required_sclk;
0584 
0585     for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
0586         if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
0587             return dc->sclk_lvls.clocks_in_khz[i];
0588     }
0589     /*
0590      * even maximum level could not satisfy requirement, this
0591      * is unexpected at this stage, should have been caught at
0592      * validation time
0593      */
0594     ASSERT(0);
0595     return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
0596 }
0597 
0598 static void dce_pplib_apply_display_requirements(
0599     struct dc *dc,
0600     struct dc_state *context)
0601 {
0602     struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
0603 
0604     pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
0605 
0606     dce110_fill_display_configs(context, pp_display_cfg);
0607 
0608     if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
0609         dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
0610 }
0611 
0612 static void dce11_pplib_apply_display_requirements(
0613     struct dc *dc,
0614     struct dc_state *context)
0615 {
0616     struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
0617 
0618     pp_display_cfg->all_displays_in_sync =
0619         context->bw_ctx.bw.dce.all_displays_in_sync;
0620     pp_display_cfg->nb_pstate_switch_disable =
0621             context->bw_ctx.bw.dce.nbp_state_change_enable == false;
0622     pp_display_cfg->cpu_cc6_disable =
0623             context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
0624     pp_display_cfg->cpu_pstate_disable =
0625             context->bw_ctx.bw.dce.cpup_state_change_enable == false;
0626     pp_display_cfg->cpu_pstate_separation_time =
0627             context->bw_ctx.bw.dce.blackout_recovery_time_us;
0628 
0629     pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
0630         / MEMORY_TYPE_MULTIPLIER_CZ;
0631 
0632     pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
0633             dc,
0634             context->bw_ctx.bw.dce.sclk_khz);
0635 
0636     /*
0637      * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
0638      * This is not required for less than 5 displays,
0639      * thus don't request decfclk in dc to avoid impact
0640      * on power saving.
0641      *
0642      */
0643     pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
0644             pp_display_cfg->min_engine_clock_khz : 0;
0645 
0646     pp_display_cfg->min_engine_clock_deep_sleep_khz
0647             = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
0648 
0649     pp_display_cfg->avail_mclk_switch_time_us =
0650                         dce110_get_min_vblank_time_us(context);
0651     /* TODO: dce11.2*/
0652     pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
0653 
0654     pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
0655 
0656     dce110_fill_display_configs(context, pp_display_cfg);
0657 
0658     /* TODO: is this still applicable?*/
0659     if (pp_display_cfg->display_count == 1) {
0660         const struct dc_crtc_timing *timing =
0661             &context->streams[0]->timing;
0662 
0663         pp_display_cfg->crtc_index =
0664             pp_display_cfg->disp_configs[0].pipe_idx;
0665         pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
0666     }
0667 
0668     if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
0669         dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
0670 }
0671 
0672 static void dce_update_clocks(struct clk_mgr *clk_mgr,
0673             struct dc_state *context,
0674             bool safe_to_lower)
0675 {
0676     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0677     struct dm_pp_power_level_change_request level_change_req;
0678     int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
0679 
0680     /*TODO: W/A for dal3 linux, investigate why this works */
0681     if (!clk_mgr_dce->dfs_bypass_active)
0682         patched_disp_clk = patched_disp_clk * 115 / 100;
0683 
0684     level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
0685     /* get max clock state from PPLIB */
0686     if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
0687             || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
0688         if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
0689             clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
0690     }
0691 
0692     if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
0693         patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk);
0694         clk_mgr->clks.dispclk_khz = patched_disp_clk;
0695     }
0696     dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
0697 }
0698 
0699 static void dce11_update_clocks(struct clk_mgr *clk_mgr,
0700             struct dc_state *context,
0701             bool safe_to_lower)
0702 {
0703     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0704     struct dm_pp_power_level_change_request level_change_req;
0705     int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
0706 
0707     /*TODO: W/A for dal3 linux, investigate why this works */
0708     if (!clk_mgr_dce->dfs_bypass_active)
0709         patched_disp_clk = patched_disp_clk * 115 / 100;
0710 
0711     level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
0712     /* get max clock state from PPLIB */
0713     if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
0714             || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
0715         if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
0716             clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
0717     }
0718 
0719     if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
0720         context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
0721         clk_mgr->clks.dispclk_khz = patched_disp_clk;
0722     }
0723     dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
0724 }
0725 
0726 static void dce112_update_clocks(struct clk_mgr *clk_mgr,
0727             struct dc_state *context,
0728             bool safe_to_lower)
0729 {
0730     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0731     struct dm_pp_power_level_change_request level_change_req;
0732     int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
0733 
0734     /*TODO: W/A for dal3 linux, investigate why this works */
0735     if (!clk_mgr_dce->dfs_bypass_active)
0736         patched_disp_clk = patched_disp_clk * 115 / 100;
0737 
0738     level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
0739     /* get max clock state from PPLIB */
0740     if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
0741             || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
0742         if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
0743             clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
0744     }
0745 
0746     if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
0747         patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk);
0748         clk_mgr->clks.dispclk_khz = patched_disp_clk;
0749     }
0750     dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
0751 }
0752 
0753 static void dce12_update_clocks(struct clk_mgr *clk_mgr,
0754             struct dc_state *context,
0755             bool safe_to_lower)
0756 {
0757     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
0758     struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
0759     int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
0760     int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
0761 
0762     /*TODO: W/A for dal3 linux, investigate why this works */
0763     if (!clk_mgr_dce->dfs_bypass_active)
0764         patched_disp_clk = patched_disp_clk * 115 / 100;
0765 
0766     if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
0767         clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
0768         /*
0769          * When xGMI is enabled, the display clk needs to be adjusted
0770          * with the WAFL link's SS percentage.
0771          */
0772         if (clk_mgr_dce->xgmi_enabled)
0773             patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss(
0774                     clk_mgr_dce, patched_disp_clk);
0775         clock_voltage_req.clocks_in_khz = patched_disp_clk;
0776         clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk);
0777 
0778         dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
0779     }
0780 
0781     if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
0782         clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
0783         clock_voltage_req.clocks_in_khz = max_pix_clk;
0784         clk_mgr->clks.phyclk_khz = max_pix_clk;
0785 
0786         dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
0787     }
0788     dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
0789 }
0790 
0791 static const struct clk_mgr_funcs dce120_funcs = {
0792     .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
0793     .update_clocks = dce12_update_clocks
0794 };
0795 
0796 static const struct clk_mgr_funcs dce112_funcs = {
0797     .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
0798     .update_clocks = dce112_update_clocks
0799 };
0800 
0801 static const struct clk_mgr_funcs dce110_funcs = {
0802     .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
0803     .update_clocks = dce11_update_clocks,
0804 };
0805 
0806 static const struct clk_mgr_funcs dce_funcs = {
0807     .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
0808     .update_clocks = dce_update_clocks
0809 };
0810 
0811 static void dce_clk_mgr_construct(
0812     struct dce_clk_mgr *clk_mgr_dce,
0813     struct dc_context *ctx,
0814     const struct clk_mgr_registers *regs,
0815     const struct clk_mgr_shift *clk_shift,
0816     const struct clk_mgr_mask *clk_mask)
0817 {
0818     struct clk_mgr *base = &clk_mgr_dce->base;
0819     struct dm_pp_static_clock_info static_clk_info = {0};
0820 
0821     base->ctx = ctx;
0822     base->funcs = &dce_funcs;
0823 
0824     clk_mgr_dce->regs = regs;
0825     clk_mgr_dce->clk_mgr_shift = clk_shift;
0826     clk_mgr_dce->clk_mgr_mask = clk_mask;
0827 
0828     clk_mgr_dce->dfs_bypass_disp_clk = 0;
0829 
0830     clk_mgr_dce->dprefclk_ss_percentage = 0;
0831     clk_mgr_dce->dprefclk_ss_divider = 1000;
0832     clk_mgr_dce->ss_on_dprefclk = false;
0833 
0834 
0835     if (dm_pp_get_static_clocks(ctx, &static_clk_info))
0836         clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
0837     else
0838         clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
0839     clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
0840 
0841     dce_clock_read_integrated_info(clk_mgr_dce);
0842     dce_clock_read_ss_info(clk_mgr_dce);
0843 }
0844 
0845 struct clk_mgr *dce_clk_mgr_create(
0846     struct dc_context *ctx,
0847     const struct clk_mgr_registers *regs,
0848     const struct clk_mgr_shift *clk_shift,
0849     const struct clk_mgr_mask *clk_mask)
0850 {
0851     struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
0852 
0853     if (clk_mgr_dce == NULL) {
0854         BREAK_TO_DEBUGGER();
0855         return NULL;
0856     }
0857 
0858     memcpy(clk_mgr_dce->max_clks_by_state,
0859         dce80_max_clks_by_state,
0860         sizeof(dce80_max_clks_by_state));
0861 
0862     dce_clk_mgr_construct(
0863         clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
0864 
0865     return &clk_mgr_dce->base;
0866 }
0867 
0868 struct clk_mgr *dce110_clk_mgr_create(
0869     struct dc_context *ctx,
0870     const struct clk_mgr_registers *regs,
0871     const struct clk_mgr_shift *clk_shift,
0872     const struct clk_mgr_mask *clk_mask)
0873 {
0874     struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
0875 
0876     if (clk_mgr_dce == NULL) {
0877         BREAK_TO_DEBUGGER();
0878         return NULL;
0879     }
0880 
0881     memcpy(clk_mgr_dce->max_clks_by_state,
0882         dce110_max_clks_by_state,
0883         sizeof(dce110_max_clks_by_state));
0884 
0885     dce_clk_mgr_construct(
0886         clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
0887 
0888     clk_mgr_dce->base.funcs = &dce110_funcs;
0889 
0890     return &clk_mgr_dce->base;
0891 }
0892 
0893 struct clk_mgr *dce112_clk_mgr_create(
0894     struct dc_context *ctx,
0895     const struct clk_mgr_registers *regs,
0896     const struct clk_mgr_shift *clk_shift,
0897     const struct clk_mgr_mask *clk_mask)
0898 {
0899     struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
0900 
0901     if (clk_mgr_dce == NULL) {
0902         BREAK_TO_DEBUGGER();
0903         return NULL;
0904     }
0905 
0906     memcpy(clk_mgr_dce->max_clks_by_state,
0907         dce112_max_clks_by_state,
0908         sizeof(dce112_max_clks_by_state));
0909 
0910     dce_clk_mgr_construct(
0911         clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
0912 
0913     clk_mgr_dce->base.funcs = &dce112_funcs;
0914 
0915     return &clk_mgr_dce->base;
0916 }
0917 
0918 struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
0919 {
0920     struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
0921 
0922     if (clk_mgr_dce == NULL) {
0923         BREAK_TO_DEBUGGER();
0924         return NULL;
0925     }
0926 
0927     memcpy(clk_mgr_dce->max_clks_by_state,
0928         dce120_max_clks_by_state,
0929         sizeof(dce120_max_clks_by_state));
0930 
0931     dce_clk_mgr_construct(
0932         clk_mgr_dce, ctx, NULL, NULL, NULL);
0933 
0934     clk_mgr_dce->dprefclk_khz = 600000;
0935     clk_mgr_dce->base.funcs = &dce120_funcs;
0936 
0937     return &clk_mgr_dce->base;
0938 }
0939 
0940 struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx)
0941 {
0942     struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce),
0943                           GFP_KERNEL);
0944 
0945     if (clk_mgr_dce == NULL) {
0946         BREAK_TO_DEBUGGER();
0947         return NULL;
0948     }
0949 
0950     memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state,
0951            sizeof(dce120_max_clks_by_state));
0952 
0953     dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL);
0954 
0955     clk_mgr_dce->dprefclk_khz = 625000;
0956     clk_mgr_dce->base.funcs = &dce120_funcs;
0957 
0958     return &clk_mgr_dce->base;
0959 }
0960 
0961 void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
0962 {
0963     struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
0964 
0965     kfree(clk_mgr_dce);
0966     *clk_mgr = NULL;
0967 }