0001
0002
0003
0004
0005
0006 #include <linux/clk-provider.h>
0007 #include <linux/platform_device.h>
0008 #include <dt-bindings/phy/phy.h>
0009
0010 #include "dsi_phy.h"
0011
0012 #define S_DIV_ROUND_UP(n, d) \
0013 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
0014
0015 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
0016 s32 min_result, bool even)
0017 {
0018 s32 v;
0019
0020 v = (tmax - tmin) * percent;
0021 v = S_DIV_ROUND_UP(v, 100) + tmin;
0022 if (even && (v & 0x1))
0023 return max_t(s32, min_result, v - 1);
0024 else
0025 return max_t(s32, min_result, v);
0026 }
0027
0028 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
0029 s32 ui, s32 coeff, s32 pcnt)
0030 {
0031 s32 tmax, tmin, clk_z;
0032 s32 temp;
0033
0034
0035 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
0036 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
0037 if (tmin > 255) {
0038 tmax = 511;
0039 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
0040 } else {
0041 tmax = 255;
0042 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
0043 }
0044
0045
0046 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
0047 timing->clk_zero = clk_z + 8 - temp;
0048 }
0049
0050 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
0051 struct msm_dsi_phy_clk_request *clk_req)
0052 {
0053 const unsigned long bit_rate = clk_req->bitclk_rate;
0054 const unsigned long esc_rate = clk_req->escclk_rate;
0055 s32 ui, lpx;
0056 s32 tmax, tmin;
0057 s32 pcnt0 = 10;
0058 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
0059 s32 pcnt2 = 10;
0060 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
0061 s32 coeff = 1000;
0062 s32 temp;
0063
0064 if (!bit_rate || !esc_rate)
0065 return -EINVAL;
0066
0067 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
0068 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
0069
0070 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
0071 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
0072 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
0073
0074 temp = lpx / ui;
0075 if (temp & 0x1)
0076 timing->hs_rqst = temp;
0077 else
0078 timing->hs_rqst = max_t(s32, 0, temp - 2);
0079
0080
0081 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
0082
0083 temp = 105 * coeff + 12 * ui - 20 * coeff;
0084 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
0085 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
0086 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
0087
0088 temp = 85 * coeff + 6 * ui;
0089 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
0090 temp = 40 * coeff + 4 * ui;
0091 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
0092 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
0093
0094 tmax = 255;
0095 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
0096 temp = 145 * coeff + 10 * ui - temp;
0097 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
0098 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
0099
0100 temp = 105 * coeff + 12 * ui - 20 * coeff;
0101 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
0102 temp = 60 * coeff + 4 * ui;
0103 tmin = DIV_ROUND_UP(temp, ui) - 2;
0104 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
0105
0106 tmax = 255;
0107 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
0108 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
0109
0110 tmax = 63;
0111 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
0112 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
0113 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
0114 timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
0115 false);
0116 tmax = 63;
0117 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
0118 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
0119 temp += 8 * ui + lpx;
0120 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
0121 if (tmin > tmax) {
0122 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
0123 timing->shared_timings.clk_pre = temp >> 1;
0124 timing->shared_timings.clk_pre_inc_by_2 = true;
0125 } else {
0126 timing->shared_timings.clk_pre =
0127 linear_inter(tmax, tmin, pcnt2, 0, false);
0128 timing->shared_timings.clk_pre_inc_by_2 = false;
0129 }
0130
0131 timing->ta_go = 3;
0132 timing->ta_sure = 0;
0133 timing->ta_get = 4;
0134
0135 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
0136 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
0137 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
0138 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
0139 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
0140 timing->hs_rqst);
0141
0142 return 0;
0143 }
0144
0145 int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
0146 struct msm_dsi_phy_clk_request *clk_req)
0147 {
0148 const unsigned long bit_rate = clk_req->bitclk_rate;
0149 const unsigned long esc_rate = clk_req->escclk_rate;
0150 s32 ui, ui_x8;
0151 s32 tmax, tmin;
0152 s32 pcnt0 = 50;
0153 s32 pcnt1 = 50;
0154 s32 pcnt2 = 10;
0155 s32 pcnt3 = 30;
0156 s32 pcnt4 = 10;
0157 s32 pcnt5 = 2;
0158 s32 coeff = 1000;
0159 s32 hb_en, hb_en_ckln, pd_ckln, pd;
0160 s32 val, val_ckln;
0161 s32 temp;
0162
0163 if (!bit_rate || !esc_rate)
0164 return -EINVAL;
0165
0166 timing->hs_halfbyte_en = 0;
0167 hb_en = 0;
0168 timing->hs_halfbyte_en_ckln = 0;
0169 hb_en_ckln = 0;
0170 timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
0171 pd_ckln = timing->hs_prep_dly_ckln;
0172 timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
0173 pd = timing->hs_prep_dly;
0174
0175 val = (hb_en << 2) + (pd << 1);
0176 val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
0177
0178 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
0179 ui_x8 = ui << 3;
0180
0181 temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
0182 tmin = max_t(s32, temp, 0);
0183 temp = (95 * coeff - val_ckln * ui) / ui_x8;
0184 tmax = max_t(s32, temp, 0);
0185 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
0186
0187 temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
0188 tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
0189 tmax = (tmin > 255) ? 511 : 255;
0190 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
0191
0192 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
0193 temp = 105 * coeff + 12 * ui - 20 * coeff;
0194 tmax = (temp + 3 * ui) / ui_x8;
0195 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
0196
0197 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
0198 tmin = max_t(s32, temp, 0);
0199 temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
0200 tmax = max_t(s32, temp, 0);
0201 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
0202
0203 temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
0204 tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
0205 tmax = 255;
0206 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
0207
0208 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
0209 temp = 105 * coeff + 12 * ui - 20 * coeff;
0210 tmax = (temp + 3 * ui) / ui_x8;
0211 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
0212
0213 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
0214 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
0215
0216 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
0217 tmax = 255;
0218 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
0219
0220 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
0221 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
0222
0223 temp = 60 * coeff + 52 * ui - 43 * ui;
0224 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
0225 tmax = 63;
0226 timing->shared_timings.clk_post =
0227 linear_inter(tmax, tmin, pcnt2, 0, false);
0228
0229 temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
0230 temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
0231 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
0232 (((timing->hs_rqst_ckln << 3) + 8) * ui);
0233 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
0234 tmax = 63;
0235 if (tmin > tmax) {
0236 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
0237 timing->shared_timings.clk_pre = temp >> 1;
0238 timing->shared_timings.clk_pre_inc_by_2 = 1;
0239 } else {
0240 timing->shared_timings.clk_pre =
0241 linear_inter(tmax, tmin, pcnt2, 0, false);
0242 timing->shared_timings.clk_pre_inc_by_2 = 0;
0243 }
0244
0245 timing->ta_go = 3;
0246 timing->ta_sure = 0;
0247 timing->ta_get = 4;
0248
0249 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
0250 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
0251 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
0252 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
0253 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
0254 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
0255 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
0256 timing->hs_prep_dly_ckln);
0257
0258 return 0;
0259 }
0260
0261 int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
0262 struct msm_dsi_phy_clk_request *clk_req)
0263 {
0264 const unsigned long bit_rate = clk_req->bitclk_rate;
0265 const unsigned long esc_rate = clk_req->escclk_rate;
0266 s32 ui, ui_x8;
0267 s32 tmax, tmin;
0268 s32 pcnt0 = 50;
0269 s32 pcnt1 = 50;
0270 s32 pcnt2 = 10;
0271 s32 pcnt3 = 30;
0272 s32 pcnt4 = 10;
0273 s32 pcnt5 = 2;
0274 s32 coeff = 1000;
0275 s32 hb_en, hb_en_ckln;
0276 s32 temp;
0277
0278 if (!bit_rate || !esc_rate)
0279 return -EINVAL;
0280
0281 timing->hs_halfbyte_en = 0;
0282 hb_en = 0;
0283 timing->hs_halfbyte_en_ckln = 0;
0284 hb_en_ckln = 0;
0285
0286 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
0287 ui_x8 = ui << 3;
0288
0289 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
0290 tmin = max_t(s32, temp, 0);
0291 temp = (95 * coeff) / ui_x8;
0292 tmax = max_t(s32, temp, 0);
0293 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
0294
0295 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
0296 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
0297 tmax = (tmin > 255) ? 511 : 255;
0298 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
0299
0300 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
0301 temp = 105 * coeff + 12 * ui - 20 * coeff;
0302 tmax = (temp + 3 * ui) / ui_x8;
0303 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
0304
0305 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
0306 tmin = max_t(s32, temp, 0);
0307 temp = (85 * coeff + 6 * ui) / ui_x8;
0308 tmax = max_t(s32, temp, 0);
0309 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
0310
0311 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
0312 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
0313 tmax = 255;
0314 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
0315
0316 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
0317 temp = 105 * coeff + 12 * ui - 20 * coeff;
0318 tmax = (temp / ui_x8) - 1;
0319 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
0320
0321 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
0322 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
0323
0324 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
0325 tmax = 255;
0326 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
0327
0328 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
0329 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
0330
0331 temp = 60 * coeff + 52 * ui - 43 * ui;
0332 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
0333 tmax = 63;
0334 timing->shared_timings.clk_post =
0335 linear_inter(tmax, tmin, pcnt2, 0, false);
0336
0337 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
0338 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
0339 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
0340 (((timing->hs_rqst_ckln << 3) + 8) * ui);
0341 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
0342 tmax = 63;
0343 if (tmin > tmax) {
0344 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
0345 timing->shared_timings.clk_pre = temp >> 1;
0346 timing->shared_timings.clk_pre_inc_by_2 = 1;
0347 } else {
0348 timing->shared_timings.clk_pre =
0349 linear_inter(tmax, tmin, pcnt2, 0, false);
0350 timing->shared_timings.clk_pre_inc_by_2 = 0;
0351 }
0352
0353 timing->ta_go = 3;
0354 timing->ta_sure = 0;
0355 timing->ta_get = 4;
0356
0357 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
0358 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
0359 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
0360 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
0361 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
0362 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
0363 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
0364 timing->hs_prep_dly_ckln);
0365
0366 return 0;
0367 }
0368
0369 int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
0370 struct msm_dsi_phy_clk_request *clk_req)
0371 {
0372 const unsigned long bit_rate = clk_req->bitclk_rate;
0373 const unsigned long esc_rate = clk_req->escclk_rate;
0374 s32 ui, ui_x8;
0375 s32 tmax, tmin;
0376 s32 pcnt_clk_prep = 50;
0377 s32 pcnt_clk_zero = 2;
0378 s32 pcnt_clk_trail = 30;
0379 s32 pcnt_hs_prep = 50;
0380 s32 pcnt_hs_zero = 10;
0381 s32 pcnt_hs_trail = 30;
0382 s32 pcnt_hs_exit = 10;
0383 s32 coeff = 1000;
0384 s32 hb_en;
0385 s32 temp;
0386
0387 if (!bit_rate || !esc_rate)
0388 return -EINVAL;
0389
0390 hb_en = 0;
0391
0392 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
0393 ui_x8 = ui << 3;
0394
0395
0396
0397
0398
0399
0400 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
0401 tmin = max_t(s32, temp, 0);
0402 temp = (95 * coeff) / ui_x8;
0403 tmax = max_t(s32, temp, 0);
0404 timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
0405
0406 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
0407 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
0408 tmax = (tmin > 255) ? 511 : 255;
0409 timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
0410
0411 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
0412 temp = 105 * coeff + 12 * ui - 20 * coeff;
0413 tmax = (temp + 3 * ui) / ui_x8;
0414 timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
0415
0416 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
0417 tmin = max_t(s32, temp, 0);
0418 temp = (85 * coeff + 6 * ui) / ui_x8;
0419 tmax = max_t(s32, temp, 0);
0420 timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
0421
0422 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
0423 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
0424 tmax = 255;
0425 timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
0426
0427 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
0428 temp = 105 * coeff + 12 * ui - 20 * coeff;
0429 tmax = (temp / ui_x8) - 1;
0430 timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
0431
0432 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
0433 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
0434
0435 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
0436 tmax = 255;
0437 timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
0438
0439
0440
0441
0442 temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
0443 tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
0444 tmax = 255;
0445 timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
0446
0447
0448
0449
0450
0451
0452 temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
0453 tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
0454 tmax = 255;
0455 timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
0456
0457 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
0458 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
0459 timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
0460 timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
0461
0462 return 0;
0463 }
0464
0465 int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
0466 struct msm_dsi_phy_clk_request *clk_req)
0467 {
0468 const unsigned long bit_rate = clk_req->bitclk_rate;
0469 const unsigned long esc_rate = clk_req->escclk_rate;
0470 s32 ui, ui_x7;
0471 s32 tmax, tmin;
0472 s32 coeff = 1000;
0473 s32 temp;
0474
0475 if (!bit_rate || !esc_rate)
0476 return -EINVAL;
0477
0478 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
0479 ui_x7 = ui * 7;
0480
0481 temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
0482 tmin = max_t(s32, temp, 0);
0483 temp = (95 * coeff) / ui_x7;
0484 tmax = max_t(s32, temp, 0);
0485 timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
0486
0487 tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
0488 tmax = 255;
0489 timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
0490
0491 tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
0492 tmax = 255;
0493 timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
0494
0495 tmin = 1;
0496 tmax = 32;
0497 timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
0498
0499 tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
0500 tmax = 64;
0501 timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
0502
0503 DBG("%d, %d, %d, %d, %d",
0504 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
0505 timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
0506
0507 return 0;
0508 }
0509
0510 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
0511 {
0512 struct regulator_bulk_data *s = phy->supplies;
0513 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
0514 struct device *dev = &phy->pdev->dev;
0515 int num = phy->cfg->reg_cfg.num;
0516 int i, ret;
0517
0518 for (i = 0; i < num; i++)
0519 s[i].supply = regs[i].name;
0520
0521 ret = devm_regulator_bulk_get(dev, num, s);
0522 if (ret < 0) {
0523 if (ret != -EPROBE_DEFER) {
0524 DRM_DEV_ERROR(dev,
0525 "%s: failed to init regulator, ret=%d\n",
0526 __func__, ret);
0527 }
0528
0529 return ret;
0530 }
0531
0532 return 0;
0533 }
0534
0535 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
0536 {
0537 struct regulator_bulk_data *s = phy->supplies;
0538 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
0539 int num = phy->cfg->reg_cfg.num;
0540 int i;
0541
0542 DBG("");
0543 for (i = num - 1; i >= 0; i--)
0544 if (regs[i].disable_load >= 0)
0545 regulator_set_load(s[i].consumer, regs[i].disable_load);
0546
0547 regulator_bulk_disable(num, s);
0548 }
0549
0550 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
0551 {
0552 struct regulator_bulk_data *s = phy->supplies;
0553 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
0554 struct device *dev = &phy->pdev->dev;
0555 int num = phy->cfg->reg_cfg.num;
0556 int ret, i;
0557
0558 DBG("");
0559 for (i = 0; i < num; i++) {
0560 if (regs[i].enable_load >= 0) {
0561 ret = regulator_set_load(s[i].consumer,
0562 regs[i].enable_load);
0563 if (ret < 0) {
0564 DRM_DEV_ERROR(dev,
0565 "regulator %d set op mode failed, %d\n",
0566 i, ret);
0567 goto fail;
0568 }
0569 }
0570 }
0571
0572 ret = regulator_bulk_enable(num, s);
0573 if (ret < 0) {
0574 DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
0575 goto fail;
0576 }
0577
0578 return 0;
0579
0580 fail:
0581 for (i--; i >= 0; i--)
0582 regulator_set_load(s[i].consumer, regs[i].disable_load);
0583 return ret;
0584 }
0585
0586 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
0587 {
0588 struct device *dev = &phy->pdev->dev;
0589 int ret;
0590
0591 pm_runtime_get_sync(dev);
0592
0593 ret = clk_prepare_enable(phy->ahb_clk);
0594 if (ret) {
0595 DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
0596 pm_runtime_put_sync(dev);
0597 }
0598
0599 return ret;
0600 }
0601
0602 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
0603 {
0604 clk_disable_unprepare(phy->ahb_clk);
0605 pm_runtime_put(&phy->pdev->dev);
0606 }
0607
0608 static const struct of_device_id dsi_phy_dt_match[] = {
0609 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
0610 { .compatible = "qcom,dsi-phy-28nm-hpm",
0611 .data = &dsi_phy_28nm_hpm_cfgs },
0612 { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
0613 .data = &dsi_phy_28nm_hpm_famb_cfgs },
0614 { .compatible = "qcom,dsi-phy-28nm-lp",
0615 .data = &dsi_phy_28nm_lp_cfgs },
0616 #endif
0617 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
0618 { .compatible = "qcom,dsi-phy-20nm",
0619 .data = &dsi_phy_20nm_cfgs },
0620 #endif
0621 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
0622 { .compatible = "qcom,dsi-phy-28nm-8960",
0623 .data = &dsi_phy_28nm_8960_cfgs },
0624 #endif
0625 #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
0626 { .compatible = "qcom,dsi-phy-14nm",
0627 .data = &dsi_phy_14nm_cfgs },
0628 { .compatible = "qcom,dsi-phy-14nm-660",
0629 .data = &dsi_phy_14nm_660_cfgs },
0630 { .compatible = "qcom,dsi-phy-14nm-8953",
0631 .data = &dsi_phy_14nm_8953_cfgs },
0632 #endif
0633 #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
0634 { .compatible = "qcom,dsi-phy-10nm",
0635 .data = &dsi_phy_10nm_cfgs },
0636 { .compatible = "qcom,dsi-phy-10nm-8998",
0637 .data = &dsi_phy_10nm_8998_cfgs },
0638 #endif
0639 #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
0640 { .compatible = "qcom,dsi-phy-7nm",
0641 .data = &dsi_phy_7nm_cfgs },
0642 { .compatible = "qcom,dsi-phy-7nm-8150",
0643 .data = &dsi_phy_7nm_8150_cfgs },
0644 { .compatible = "qcom,sc7280-dsi-phy-7nm",
0645 .data = &dsi_phy_7nm_7280_cfgs },
0646 #endif
0647 {}
0648 };
0649
0650
0651
0652
0653
0654
0655 static int dsi_phy_get_id(struct msm_dsi_phy *phy)
0656 {
0657 struct platform_device *pdev = phy->pdev;
0658 const struct msm_dsi_phy_cfg *cfg = phy->cfg;
0659 struct resource *res;
0660 int i;
0661
0662 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
0663 if (!res)
0664 return -EINVAL;
0665
0666 for (i = 0; i < cfg->num_dsi_phy; i++) {
0667 if (cfg->io_start[i] == res->start)
0668 return i;
0669 }
0670
0671 return -EINVAL;
0672 }
0673
0674 static int dsi_phy_driver_probe(struct platform_device *pdev)
0675 {
0676 struct msm_dsi_phy *phy;
0677 struct device *dev = &pdev->dev;
0678 u32 phy_type;
0679 int ret;
0680
0681 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
0682 if (!phy)
0683 return -ENOMEM;
0684
0685 phy->provided_clocks = devm_kzalloc(dev,
0686 struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
0687 GFP_KERNEL);
0688 if (!phy->provided_clocks)
0689 return -ENOMEM;
0690
0691 phy->provided_clocks->num = NUM_PROVIDED_CLKS;
0692
0693 phy->cfg = of_device_get_match_data(&pdev->dev);
0694 if (!phy->cfg)
0695 return -ENODEV;
0696
0697 phy->pdev = pdev;
0698
0699 phy->id = dsi_phy_get_id(phy);
0700 if (phy->id < 0) {
0701 ret = phy->id;
0702 DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
0703 __func__, ret);
0704 goto fail;
0705 }
0706
0707 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
0708 "qcom,dsi-phy-regulator-ldo-mode");
0709 if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
0710 phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
0711
0712 phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
0713 if (IS_ERR(phy->base)) {
0714 DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
0715 ret = -ENOMEM;
0716 goto fail;
0717 }
0718
0719 phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
0720 if (IS_ERR(phy->pll_base)) {
0721 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
0722 ret = -ENOMEM;
0723 goto fail;
0724 }
0725
0726 if (phy->cfg->has_phy_lane) {
0727 phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
0728 if (IS_ERR(phy->lane_base)) {
0729 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
0730 ret = -ENOMEM;
0731 goto fail;
0732 }
0733 }
0734
0735 if (phy->cfg->has_phy_regulator) {
0736 phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
0737 if (IS_ERR(phy->reg_base)) {
0738 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
0739 ret = -ENOMEM;
0740 goto fail;
0741 }
0742 }
0743
0744 if (phy->cfg->ops.parse_dt_properties) {
0745 ret = phy->cfg->ops.parse_dt_properties(phy);
0746 if (ret)
0747 goto fail;
0748 }
0749
0750 ret = dsi_phy_regulator_init(phy);
0751 if (ret)
0752 goto fail;
0753
0754 phy->ahb_clk = msm_clk_get(pdev, "iface");
0755 if (IS_ERR(phy->ahb_clk)) {
0756 DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
0757 ret = PTR_ERR(phy->ahb_clk);
0758 goto fail;
0759 }
0760
0761
0762
0763
0764 ret = dsi_phy_enable_resource(phy);
0765 if (ret)
0766 goto fail;
0767
0768 if (phy->cfg->ops.pll_init) {
0769 ret = phy->cfg->ops.pll_init(phy);
0770 if (ret) {
0771 DRM_DEV_INFO(dev,
0772 "%s: pll init failed: %d, need separate pll clk driver\n",
0773 __func__, ret);
0774 goto fail;
0775 }
0776 }
0777
0778 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
0779 phy->provided_clocks);
0780 if (ret) {
0781 DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
0782 goto fail;
0783 }
0784
0785 dsi_phy_disable_resource(phy);
0786
0787 platform_set_drvdata(pdev, phy);
0788
0789 return 0;
0790
0791 fail:
0792 return ret;
0793 }
0794
0795 static struct platform_driver dsi_phy_platform_driver = {
0796 .probe = dsi_phy_driver_probe,
0797 .driver = {
0798 .name = "msm_dsi_phy",
0799 .of_match_table = dsi_phy_dt_match,
0800 },
0801 };
0802
0803 void __init msm_dsi_phy_driver_register(void)
0804 {
0805 platform_driver_register(&dsi_phy_platform_driver);
0806 }
0807
0808 void __exit msm_dsi_phy_driver_unregister(void)
0809 {
0810 platform_driver_unregister(&dsi_phy_platform_driver);
0811 }
0812
0813 int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
0814 struct msm_dsi_phy_clk_request *clk_req,
0815 struct msm_dsi_phy_shared_timings *shared_timings)
0816 {
0817 struct device *dev;
0818 int ret;
0819
0820 if (!phy || !phy->cfg->ops.enable)
0821 return -EINVAL;
0822
0823 dev = &phy->pdev->dev;
0824
0825 ret = dsi_phy_enable_resource(phy);
0826 if (ret) {
0827 DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
0828 __func__, ret);
0829 goto res_en_fail;
0830 }
0831
0832 ret = dsi_phy_regulator_enable(phy);
0833 if (ret) {
0834 DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
0835 __func__, ret);
0836 goto reg_en_fail;
0837 }
0838
0839 ret = phy->cfg->ops.enable(phy, clk_req);
0840 if (ret) {
0841 DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
0842 goto phy_en_fail;
0843 }
0844
0845 memcpy(shared_timings, &phy->timing.shared_timings,
0846 sizeof(*shared_timings));
0847
0848
0849
0850
0851
0852
0853
0854 if (phy->usecase != MSM_DSI_PHY_SLAVE) {
0855 ret = msm_dsi_phy_pll_restore_state(phy);
0856 if (ret) {
0857 DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
0858 __func__, ret);
0859 goto pll_restor_fail;
0860 }
0861 }
0862
0863 return 0;
0864
0865 pll_restor_fail:
0866 if (phy->cfg->ops.disable)
0867 phy->cfg->ops.disable(phy);
0868 phy_en_fail:
0869 dsi_phy_regulator_disable(phy);
0870 reg_en_fail:
0871 dsi_phy_disable_resource(phy);
0872 res_en_fail:
0873 return ret;
0874 }
0875
0876 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
0877 {
0878 if (!phy || !phy->cfg->ops.disable)
0879 return;
0880
0881 phy->cfg->ops.disable(phy);
0882
0883 dsi_phy_regulator_disable(phy);
0884 dsi_phy_disable_resource(phy);
0885 }
0886
0887 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
0888 enum msm_dsi_phy_usecase uc)
0889 {
0890 if (phy)
0891 phy->usecase = uc;
0892 }
0893
0894
0895 bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
0896 {
0897 if (!phy || !phy->cfg->ops.set_continuous_clock)
0898 return false;
0899
0900 return phy->cfg->ops.set_continuous_clock(phy, enable);
0901 }
0902
0903 void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
0904 {
0905 if (phy->cfg->ops.save_pll_state) {
0906 phy->cfg->ops.save_pll_state(phy);
0907 phy->state_saved = true;
0908 }
0909 }
0910
0911 int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
0912 {
0913 int ret;
0914
0915 if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
0916 ret = phy->cfg->ops.restore_pll_state(phy);
0917 if (ret)
0918 return ret;
0919
0920 phy->state_saved = false;
0921 }
0922
0923 return 0;
0924 }
0925
0926 void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
0927 {
0928 msm_disp_snapshot_add_block(disp_state,
0929 phy->base_size, phy->base,
0930 "dsi%d_phy", phy->id);
0931
0932
0933 if (phy->pll_on)
0934 msm_disp_snapshot_add_block(disp_state,
0935 phy->pll_size, phy->pll_base,
0936 "dsi%d_pll", phy->id);
0937
0938 if (phy->lane_base)
0939 msm_disp_snapshot_add_block(disp_state,
0940 phy->lane_size, phy->lane_base,
0941 "dsi%d_lane", phy->id);
0942
0943 if (phy->reg_base)
0944 msm_disp_snapshot_add_block(disp_state,
0945 phy->reg_size, phy->reg_base,
0946 "dsi%d_reg", phy->id);
0947 }