0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <drm/amdgpu_drm.h>
0025 #include "amdgpu.h"
0026 #include "atom.h"
0027 #include "atombios_encoders.h"
0028 #include "amdgpu_pll.h"
0029 #include <asm/div64.h>
0030 #include <linux/gcd.h>
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
0045 unsigned nom_min, unsigned den_min)
0046 {
0047 unsigned tmp;
0048
0049
0050 tmp = gcd(*nom, *den);
0051 *nom /= tmp;
0052 *den /= tmp;
0053
0054
0055 if (*nom < nom_min) {
0056 tmp = DIV_ROUND_UP(nom_min, *nom);
0057 *nom *= tmp;
0058 *den *= tmp;
0059 }
0060
0061
0062 if (*den < den_min) {
0063 tmp = DIV_ROUND_UP(den_min, *den);
0064 *nom *= tmp;
0065 *den *= tmp;
0066 }
0067 }
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
0085 unsigned int den, unsigned int post_div,
0086 unsigned int fb_div_max, unsigned int ref_div_max,
0087 unsigned int *fb_div, unsigned int *ref_div)
0088 {
0089
0090
0091 if (adev->family == AMDGPU_FAMILY_SI)
0092 ref_div_max = min(100 / post_div, ref_div_max);
0093 else
0094 ref_div_max = min(128 / post_div, ref_div_max);
0095
0096
0097 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
0098 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
0099
0100
0101 if (*fb_div > fb_div_max) {
0102 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
0103 *fb_div = fb_div_max;
0104 }
0105 }
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 void amdgpu_pll_compute(struct amdgpu_device *adev,
0123 struct amdgpu_pll *pll,
0124 u32 freq,
0125 u32 *dot_clock_p,
0126 u32 *fb_div_p,
0127 u32 *frac_fb_div_p,
0128 u32 *ref_div_p,
0129 u32 *post_div_p)
0130 {
0131 unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
0132 freq : freq / 10;
0133
0134 unsigned fb_div_min, fb_div_max, fb_div;
0135 unsigned post_div_min, post_div_max, post_div;
0136 unsigned ref_div_min, ref_div_max, ref_div;
0137 unsigned post_div_best, diff_best;
0138 unsigned nom, den;
0139
0140
0141 fb_div_min = pll->min_feedback_div;
0142 fb_div_max = pll->max_feedback_div;
0143
0144 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
0145 fb_div_min *= 10;
0146 fb_div_max *= 10;
0147 }
0148
0149
0150 if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
0151 ref_div_min = pll->reference_div;
0152 else
0153 ref_div_min = pll->min_ref_div;
0154
0155 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
0156 pll->flags & AMDGPU_PLL_USE_REF_DIV)
0157 ref_div_max = pll->reference_div;
0158 else
0159 ref_div_max = pll->max_ref_div;
0160
0161
0162 if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
0163 post_div_min = pll->post_div;
0164 post_div_max = pll->post_div;
0165 } else {
0166 unsigned vco_min, vco_max;
0167
0168 if (pll->flags & AMDGPU_PLL_IS_LCD) {
0169 vco_min = pll->lcd_pll_out_min;
0170 vco_max = pll->lcd_pll_out_max;
0171 } else {
0172 vco_min = pll->pll_out_min;
0173 vco_max = pll->pll_out_max;
0174 }
0175
0176 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
0177 vco_min *= 10;
0178 vco_max *= 10;
0179 }
0180
0181 post_div_min = vco_min / target_clock;
0182 if ((target_clock * post_div_min) < vco_min)
0183 ++post_div_min;
0184 if (post_div_min < pll->min_post_div)
0185 post_div_min = pll->min_post_div;
0186
0187 post_div_max = vco_max / target_clock;
0188 if ((target_clock * post_div_max) > vco_max)
0189 --post_div_max;
0190 if (post_div_max > pll->max_post_div)
0191 post_div_max = pll->max_post_div;
0192 }
0193
0194
0195 nom = target_clock;
0196 den = pll->reference_freq;
0197
0198
0199 amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
0200
0201
0202 if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
0203 post_div_best = post_div_min;
0204 else
0205 post_div_best = post_div_max;
0206 diff_best = ~0;
0207
0208 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
0209 unsigned diff;
0210 amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
0211 ref_div_max, &fb_div, &ref_div);
0212 diff = abs(target_clock - (pll->reference_freq * fb_div) /
0213 (ref_div * post_div));
0214
0215 if (diff < diff_best || (diff == diff_best &&
0216 !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
0217
0218 post_div_best = post_div;
0219 diff_best = diff;
0220 }
0221 }
0222 post_div = post_div_best;
0223
0224
0225 amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
0226 &fb_div, &ref_div);
0227
0228
0229
0230 amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
0231
0232
0233 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
0234 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
0235 if (fb_div < fb_div_min) {
0236 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
0237 fb_div *= tmp;
0238 ref_div *= tmp;
0239 }
0240 }
0241
0242
0243 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
0244 *fb_div_p = fb_div / 10;
0245 *frac_fb_div_p = fb_div % 10;
0246 } else {
0247 *fb_div_p = fb_div;
0248 *frac_fb_div_p = 0;
0249 }
0250
0251 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
0252 (pll->reference_freq * *frac_fb_div_p)) /
0253 (ref_div * post_div * 10);
0254 *ref_div_p = ref_div;
0255 *post_div_p = post_div;
0256
0257 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
0258 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
0259 ref_div, post_div);
0260 }
0261
0262
0263
0264
0265
0266
0267
0268
0269 u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
0270 {
0271 struct drm_device *dev = crtc->dev;
0272 struct drm_crtc *test_crtc;
0273 struct amdgpu_crtc *test_amdgpu_crtc;
0274 u32 pll_in_use = 0;
0275
0276 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
0277 if (crtc == test_crtc)
0278 continue;
0279
0280 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
0281 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
0282 pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
0283 }
0284 return pll_in_use;
0285 }
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
0297 {
0298 struct drm_device *dev = crtc->dev;
0299 struct drm_crtc *test_crtc;
0300 struct amdgpu_crtc *test_amdgpu_crtc;
0301
0302 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
0303 if (crtc == test_crtc)
0304 continue;
0305 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
0306 if (test_amdgpu_crtc->encoder &&
0307 ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
0308
0309 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
0310 return test_amdgpu_crtc->pll_id;
0311 }
0312 }
0313 return ATOM_PPLL_INVALID;
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
0325 {
0326 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
0327 struct drm_device *dev = crtc->dev;
0328 struct drm_crtc *test_crtc;
0329 struct amdgpu_crtc *test_amdgpu_crtc;
0330 u32 adjusted_clock, test_adjusted_clock;
0331
0332 adjusted_clock = amdgpu_crtc->adjusted_clock;
0333
0334 if (adjusted_clock == 0)
0335 return ATOM_PPLL_INVALID;
0336
0337 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
0338 if (crtc == test_crtc)
0339 continue;
0340 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
0341 if (test_amdgpu_crtc->encoder &&
0342 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
0343
0344 if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
0345
0346 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
0347 return test_amdgpu_crtc->pll_id;
0348 }
0349
0350 test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
0351 if ((crtc->mode.clock == test_crtc->mode.clock) &&
0352 (adjusted_clock == test_adjusted_clock) &&
0353 (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
0354 (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
0355 return test_amdgpu_crtc->pll_id;
0356 }
0357 }
0358 return ATOM_PPLL_INVALID;
0359 }